18c2ecf20Sopenharmony_ci/***********************license start***************
28c2ecf20Sopenharmony_ci * Author: Cavium Networks
38c2ecf20Sopenharmony_ci *
48c2ecf20Sopenharmony_ci * Contact: support@caviumnetworks.com
58c2ecf20Sopenharmony_ci * This file is part of the OCTEON SDK
68c2ecf20Sopenharmony_ci *
78c2ecf20Sopenharmony_ci * Copyright (c) 2003-2008 Cavium Networks
88c2ecf20Sopenharmony_ci *
98c2ecf20Sopenharmony_ci * This file is free software; you can redistribute it and/or modify
108c2ecf20Sopenharmony_ci * it under the terms of the GNU General Public License, Version 2, as
118c2ecf20Sopenharmony_ci * published by the Free Software Foundation.
128c2ecf20Sopenharmony_ci *
138c2ecf20Sopenharmony_ci * This file is distributed in the hope that it will be useful, but
148c2ecf20Sopenharmony_ci * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
158c2ecf20Sopenharmony_ci * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
168c2ecf20Sopenharmony_ci * NONINFRINGEMENT.  See the GNU General Public License for more
178c2ecf20Sopenharmony_ci * details.
188c2ecf20Sopenharmony_ci *
198c2ecf20Sopenharmony_ci * You should have received a copy of the GNU General Public License
208c2ecf20Sopenharmony_ci * along with this file; if not, write to the Free Software
218c2ecf20Sopenharmony_ci * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
228c2ecf20Sopenharmony_ci * or visit http://www.gnu.org/licenses/.
238c2ecf20Sopenharmony_ci *
248c2ecf20Sopenharmony_ci * This file may also be available under a different license from Cavium.
258c2ecf20Sopenharmony_ci * Contact Cavium Networks for more information
268c2ecf20Sopenharmony_ci ***********************license end**************************************/
278c2ecf20Sopenharmony_ci
288c2ecf20Sopenharmony_ci/**
298c2ecf20Sopenharmony_ci * @file
308c2ecf20Sopenharmony_ci *
318c2ecf20Sopenharmony_ci * Interface to the hardware Free Pool Allocator.
328c2ecf20Sopenharmony_ci *
338c2ecf20Sopenharmony_ci *
348c2ecf20Sopenharmony_ci */
358c2ecf20Sopenharmony_ci
368c2ecf20Sopenharmony_ci#ifndef __CVMX_FPA_H__
378c2ecf20Sopenharmony_ci#define __CVMX_FPA_H__
388c2ecf20Sopenharmony_ci
398c2ecf20Sopenharmony_ci#include <linux/delay.h>
408c2ecf20Sopenharmony_ci
418c2ecf20Sopenharmony_ci#include <asm/octeon/cvmx-address.h>
428c2ecf20Sopenharmony_ci#include <asm/octeon/cvmx-fpa-defs.h>
438c2ecf20Sopenharmony_ci
448c2ecf20Sopenharmony_ci#define CVMX_FPA_NUM_POOLS	8
458c2ecf20Sopenharmony_ci#define CVMX_FPA_MIN_BLOCK_SIZE 128
468c2ecf20Sopenharmony_ci#define CVMX_FPA_ALIGNMENT	128
478c2ecf20Sopenharmony_ci
488c2ecf20Sopenharmony_ci/**
498c2ecf20Sopenharmony_ci * Structure describing the data format used for stores to the FPA.
508c2ecf20Sopenharmony_ci */
518c2ecf20Sopenharmony_citypedef union {
528c2ecf20Sopenharmony_ci	uint64_t u64;
538c2ecf20Sopenharmony_ci	struct {
548c2ecf20Sopenharmony_ci#ifdef __BIG_ENDIAN_BITFIELD
558c2ecf20Sopenharmony_ci		/*
568c2ecf20Sopenharmony_ci		 * the (64-bit word) location in scratchpad to write
578c2ecf20Sopenharmony_ci		 * to (if len != 0)
588c2ecf20Sopenharmony_ci		 */
598c2ecf20Sopenharmony_ci		uint64_t scraddr:8;
608c2ecf20Sopenharmony_ci		/* the number of words in the response (0 => no response) */
618c2ecf20Sopenharmony_ci		uint64_t len:8;
628c2ecf20Sopenharmony_ci		/* the ID of the device on the non-coherent bus */
638c2ecf20Sopenharmony_ci		uint64_t did:8;
648c2ecf20Sopenharmony_ci		/*
658c2ecf20Sopenharmony_ci		 * the address that will appear in the first tick on
668c2ecf20Sopenharmony_ci		 * the NCB bus.
678c2ecf20Sopenharmony_ci		 */
688c2ecf20Sopenharmony_ci		uint64_t addr:40;
698c2ecf20Sopenharmony_ci#else
708c2ecf20Sopenharmony_ci		uint64_t addr:40;
718c2ecf20Sopenharmony_ci		uint64_t did:8;
728c2ecf20Sopenharmony_ci		uint64_t len:8;
738c2ecf20Sopenharmony_ci		uint64_t scraddr:8;
748c2ecf20Sopenharmony_ci#endif
758c2ecf20Sopenharmony_ci	} s;
768c2ecf20Sopenharmony_ci} cvmx_fpa_iobdma_data_t;
778c2ecf20Sopenharmony_ci
788c2ecf20Sopenharmony_ci/**
798c2ecf20Sopenharmony_ci * Structure describing the current state of a FPA pool.
808c2ecf20Sopenharmony_ci */
818c2ecf20Sopenharmony_citypedef struct {
828c2ecf20Sopenharmony_ci	/* Name it was created under */
838c2ecf20Sopenharmony_ci	const char *name;
848c2ecf20Sopenharmony_ci	/* Size of each block */
858c2ecf20Sopenharmony_ci	uint64_t size;
868c2ecf20Sopenharmony_ci	/* The base memory address of whole block */
878c2ecf20Sopenharmony_ci	void *base;
888c2ecf20Sopenharmony_ci	/* The number of elements in the pool at creation */
898c2ecf20Sopenharmony_ci	uint64_t starting_element_count;
908c2ecf20Sopenharmony_ci} cvmx_fpa_pool_info_t;
918c2ecf20Sopenharmony_ci
928c2ecf20Sopenharmony_ci/**
938c2ecf20Sopenharmony_ci * Current state of all the pools. Use access functions
948c2ecf20Sopenharmony_ci * instead of using it directly.
958c2ecf20Sopenharmony_ci */
968c2ecf20Sopenharmony_ciextern cvmx_fpa_pool_info_t cvmx_fpa_pool_info[CVMX_FPA_NUM_POOLS];
978c2ecf20Sopenharmony_ci
988c2ecf20Sopenharmony_ci/* CSR typedefs have been moved to cvmx-csr-*.h */
998c2ecf20Sopenharmony_ci
1008c2ecf20Sopenharmony_ci/**
1018c2ecf20Sopenharmony_ci * Return the name of the pool
1028c2ecf20Sopenharmony_ci *
1038c2ecf20Sopenharmony_ci * @pool:   Pool to get the name of
1048c2ecf20Sopenharmony_ci * Returns The name
1058c2ecf20Sopenharmony_ci */
1068c2ecf20Sopenharmony_cistatic inline const char *cvmx_fpa_get_name(uint64_t pool)
1078c2ecf20Sopenharmony_ci{
1088c2ecf20Sopenharmony_ci	return cvmx_fpa_pool_info[pool].name;
1098c2ecf20Sopenharmony_ci}
1108c2ecf20Sopenharmony_ci
1118c2ecf20Sopenharmony_ci/**
1128c2ecf20Sopenharmony_ci * Return the base of the pool
1138c2ecf20Sopenharmony_ci *
1148c2ecf20Sopenharmony_ci * @pool:   Pool to get the base of
1158c2ecf20Sopenharmony_ci * Returns The base
1168c2ecf20Sopenharmony_ci */
1178c2ecf20Sopenharmony_cistatic inline void *cvmx_fpa_get_base(uint64_t pool)
1188c2ecf20Sopenharmony_ci{
1198c2ecf20Sopenharmony_ci	return cvmx_fpa_pool_info[pool].base;
1208c2ecf20Sopenharmony_ci}
1218c2ecf20Sopenharmony_ci
1228c2ecf20Sopenharmony_ci/**
1238c2ecf20Sopenharmony_ci * Check if a pointer belongs to an FPA pool. Return non-zero
1248c2ecf20Sopenharmony_ci * if the supplied pointer is inside the memory controlled by
1258c2ecf20Sopenharmony_ci * an FPA pool.
1268c2ecf20Sopenharmony_ci *
1278c2ecf20Sopenharmony_ci * @pool:   Pool to check
1288c2ecf20Sopenharmony_ci * @ptr:    Pointer to check
1298c2ecf20Sopenharmony_ci * Returns Non-zero if pointer is in the pool. Zero if not
1308c2ecf20Sopenharmony_ci */
1318c2ecf20Sopenharmony_cistatic inline int cvmx_fpa_is_member(uint64_t pool, void *ptr)
1328c2ecf20Sopenharmony_ci{
1338c2ecf20Sopenharmony_ci	return ((ptr >= cvmx_fpa_pool_info[pool].base) &&
1348c2ecf20Sopenharmony_ci		((char *)ptr <
1358c2ecf20Sopenharmony_ci		 ((char *)(cvmx_fpa_pool_info[pool].base)) +
1368c2ecf20Sopenharmony_ci		 cvmx_fpa_pool_info[pool].size *
1378c2ecf20Sopenharmony_ci		 cvmx_fpa_pool_info[pool].starting_element_count));
1388c2ecf20Sopenharmony_ci}
1398c2ecf20Sopenharmony_ci
1408c2ecf20Sopenharmony_ci/**
1418c2ecf20Sopenharmony_ci * Enable the FPA for use. Must be performed after any CSR
1428c2ecf20Sopenharmony_ci * configuration but before any other FPA functions.
1438c2ecf20Sopenharmony_ci */
1448c2ecf20Sopenharmony_cistatic inline void cvmx_fpa_enable(void)
1458c2ecf20Sopenharmony_ci{
1468c2ecf20Sopenharmony_ci	union cvmx_fpa_ctl_status status;
1478c2ecf20Sopenharmony_ci
1488c2ecf20Sopenharmony_ci	status.u64 = cvmx_read_csr(CVMX_FPA_CTL_STATUS);
1498c2ecf20Sopenharmony_ci	if (status.s.enb) {
1508c2ecf20Sopenharmony_ci		cvmx_dprintf
1518c2ecf20Sopenharmony_ci		    ("Warning: Enabling FPA when FPA already enabled.\n");
1528c2ecf20Sopenharmony_ci	}
1538c2ecf20Sopenharmony_ci
1548c2ecf20Sopenharmony_ci	/*
1558c2ecf20Sopenharmony_ci	 * Do runtime check as we allow pass1 compiled code to run on
1568c2ecf20Sopenharmony_ci	 * pass2 chips.
1578c2ecf20Sopenharmony_ci	 */
1588c2ecf20Sopenharmony_ci	if (cvmx_octeon_is_pass1()) {
1598c2ecf20Sopenharmony_ci		union cvmx_fpa_fpfx_marks marks;
1608c2ecf20Sopenharmony_ci		int i;
1618c2ecf20Sopenharmony_ci		for (i = 1; i < 8; i++) {
1628c2ecf20Sopenharmony_ci			marks.u64 =
1638c2ecf20Sopenharmony_ci			    cvmx_read_csr(CVMX_FPA_FPF1_MARKS + (i - 1) * 8ull);
1648c2ecf20Sopenharmony_ci			marks.s.fpf_wr = 0xe0;
1658c2ecf20Sopenharmony_ci			cvmx_write_csr(CVMX_FPA_FPF1_MARKS + (i - 1) * 8ull,
1668c2ecf20Sopenharmony_ci				       marks.u64);
1678c2ecf20Sopenharmony_ci		}
1688c2ecf20Sopenharmony_ci
1698c2ecf20Sopenharmony_ci		/* Enforce a 10 cycle delay between config and enable */
1708c2ecf20Sopenharmony_ci		__delay(10);
1718c2ecf20Sopenharmony_ci	}
1728c2ecf20Sopenharmony_ci
1738c2ecf20Sopenharmony_ci	/* FIXME: CVMX_FPA_CTL_STATUS read is unmodelled */
1748c2ecf20Sopenharmony_ci	status.u64 = 0;
1758c2ecf20Sopenharmony_ci	status.s.enb = 1;
1768c2ecf20Sopenharmony_ci	cvmx_write_csr(CVMX_FPA_CTL_STATUS, status.u64);
1778c2ecf20Sopenharmony_ci}
1788c2ecf20Sopenharmony_ci
1798c2ecf20Sopenharmony_ci/**
1808c2ecf20Sopenharmony_ci * Get a new block from the FPA
1818c2ecf20Sopenharmony_ci *
1828c2ecf20Sopenharmony_ci * @pool:   Pool to get the block from
1838c2ecf20Sopenharmony_ci * Returns Pointer to the block or NULL on failure
1848c2ecf20Sopenharmony_ci */
1858c2ecf20Sopenharmony_cistatic inline void *cvmx_fpa_alloc(uint64_t pool)
1868c2ecf20Sopenharmony_ci{
1878c2ecf20Sopenharmony_ci	uint64_t address =
1888c2ecf20Sopenharmony_ci	    cvmx_read_csr(CVMX_ADDR_DID(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool)));
1898c2ecf20Sopenharmony_ci	if (address)
1908c2ecf20Sopenharmony_ci		return cvmx_phys_to_ptr(address);
1918c2ecf20Sopenharmony_ci	else
1928c2ecf20Sopenharmony_ci		return NULL;
1938c2ecf20Sopenharmony_ci}
1948c2ecf20Sopenharmony_ci
1958c2ecf20Sopenharmony_ci/**
1968c2ecf20Sopenharmony_ci * Asynchronously get a new block from the FPA
1978c2ecf20Sopenharmony_ci *
1988c2ecf20Sopenharmony_ci * @scr_addr: Local scratch address to put response in.	 This is a byte address,
1998c2ecf20Sopenharmony_ci *		    but must be 8 byte aligned.
2008c2ecf20Sopenharmony_ci * @pool:      Pool to get the block from
2018c2ecf20Sopenharmony_ci */
2028c2ecf20Sopenharmony_cistatic inline void cvmx_fpa_async_alloc(uint64_t scr_addr, uint64_t pool)
2038c2ecf20Sopenharmony_ci{
2048c2ecf20Sopenharmony_ci	cvmx_fpa_iobdma_data_t data;
2058c2ecf20Sopenharmony_ci
2068c2ecf20Sopenharmony_ci	/*
2078c2ecf20Sopenharmony_ci	 * Hardware only uses 64 bit aligned locations, so convert
2088c2ecf20Sopenharmony_ci	 * from byte address to 64-bit index
2098c2ecf20Sopenharmony_ci	 */
2108c2ecf20Sopenharmony_ci	data.s.scraddr = scr_addr >> 3;
2118c2ecf20Sopenharmony_ci	data.s.len = 1;
2128c2ecf20Sopenharmony_ci	data.s.did = CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool);
2138c2ecf20Sopenharmony_ci	data.s.addr = 0;
2148c2ecf20Sopenharmony_ci	cvmx_send_single(data.u64);
2158c2ecf20Sopenharmony_ci}
2168c2ecf20Sopenharmony_ci
2178c2ecf20Sopenharmony_ci/**
2188c2ecf20Sopenharmony_ci * Free a block allocated with a FPA pool.  Does NOT provide memory
2198c2ecf20Sopenharmony_ci * ordering in cases where the memory block was modified by the core.
2208c2ecf20Sopenharmony_ci *
2218c2ecf20Sopenharmony_ci * @ptr:    Block to free
2228c2ecf20Sopenharmony_ci * @pool:   Pool to put it in
2238c2ecf20Sopenharmony_ci * @num_cache_lines:
2248c2ecf20Sopenharmony_ci *		 Cache lines to invalidate
2258c2ecf20Sopenharmony_ci */
2268c2ecf20Sopenharmony_cistatic inline void cvmx_fpa_free_nosync(void *ptr, uint64_t pool,
2278c2ecf20Sopenharmony_ci					uint64_t num_cache_lines)
2288c2ecf20Sopenharmony_ci{
2298c2ecf20Sopenharmony_ci	cvmx_addr_t newptr;
2308c2ecf20Sopenharmony_ci	newptr.u64 = cvmx_ptr_to_phys(ptr);
2318c2ecf20Sopenharmony_ci	newptr.sfilldidspace.didspace =
2328c2ecf20Sopenharmony_ci	    CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool));
2338c2ecf20Sopenharmony_ci	/* Prevent GCC from reordering around free */
2348c2ecf20Sopenharmony_ci	barrier();
2358c2ecf20Sopenharmony_ci	/* value written is number of cache lines not written back */
2368c2ecf20Sopenharmony_ci	cvmx_write_io(newptr.u64, num_cache_lines);
2378c2ecf20Sopenharmony_ci}
2388c2ecf20Sopenharmony_ci
2398c2ecf20Sopenharmony_ci/**
2408c2ecf20Sopenharmony_ci * Free a block allocated with a FPA pool.  Provides required memory
2418c2ecf20Sopenharmony_ci * ordering in cases where memory block was modified by core.
2428c2ecf20Sopenharmony_ci *
2438c2ecf20Sopenharmony_ci * @ptr:    Block to free
2448c2ecf20Sopenharmony_ci * @pool:   Pool to put it in
2458c2ecf20Sopenharmony_ci * @num_cache_lines:
2468c2ecf20Sopenharmony_ci *		 Cache lines to invalidate
2478c2ecf20Sopenharmony_ci */
2488c2ecf20Sopenharmony_cistatic inline void cvmx_fpa_free(void *ptr, uint64_t pool,
2498c2ecf20Sopenharmony_ci				 uint64_t num_cache_lines)
2508c2ecf20Sopenharmony_ci{
2518c2ecf20Sopenharmony_ci	cvmx_addr_t newptr;
2528c2ecf20Sopenharmony_ci	newptr.u64 = cvmx_ptr_to_phys(ptr);
2538c2ecf20Sopenharmony_ci	newptr.sfilldidspace.didspace =
2548c2ecf20Sopenharmony_ci	    CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool));
2558c2ecf20Sopenharmony_ci	/*
2568c2ecf20Sopenharmony_ci	 * Make sure that any previous writes to memory go out before
2578c2ecf20Sopenharmony_ci	 * we free this buffer.	 This also serves as a barrier to
2588c2ecf20Sopenharmony_ci	 * prevent GCC from reordering operations to after the
2598c2ecf20Sopenharmony_ci	 * free.
2608c2ecf20Sopenharmony_ci	 */
2618c2ecf20Sopenharmony_ci	CVMX_SYNCWS;
2628c2ecf20Sopenharmony_ci	/* value written is number of cache lines not written back */
2638c2ecf20Sopenharmony_ci	cvmx_write_io(newptr.u64, num_cache_lines);
2648c2ecf20Sopenharmony_ci}
2658c2ecf20Sopenharmony_ci
2668c2ecf20Sopenharmony_ci/**
2678c2ecf20Sopenharmony_ci * Setup a FPA pool to control a new block of memory.
2688c2ecf20Sopenharmony_ci * This can only be called once per pool. Make sure proper
2698c2ecf20Sopenharmony_ci * locking enforces this.
2708c2ecf20Sopenharmony_ci *
2718c2ecf20Sopenharmony_ci * @pool:	Pool to initialize
2728c2ecf20Sopenharmony_ci *		     0 <= pool < 8
2738c2ecf20Sopenharmony_ci * @name:	Constant character string to name this pool.
2748c2ecf20Sopenharmony_ci *		     String is not copied.
2758c2ecf20Sopenharmony_ci * @buffer:	Pointer to the block of memory to use. This must be
2768c2ecf20Sopenharmony_ci *		     accessible by all processors and external hardware.
2778c2ecf20Sopenharmony_ci * @block_size: Size for each block controlled by the FPA
2788c2ecf20Sopenharmony_ci * @num_blocks: Number of blocks
2798c2ecf20Sopenharmony_ci *
2808c2ecf20Sopenharmony_ci * Returns 0 on Success,
2818c2ecf20Sopenharmony_ci *	   -1 on failure
2828c2ecf20Sopenharmony_ci */
2838c2ecf20Sopenharmony_ciextern int cvmx_fpa_setup_pool(uint64_t pool, const char *name, void *buffer,
2848c2ecf20Sopenharmony_ci			       uint64_t block_size, uint64_t num_blocks);
2858c2ecf20Sopenharmony_ci
2868c2ecf20Sopenharmony_ci/**
2878c2ecf20Sopenharmony_ci * Shutdown a Memory pool and validate that it had all of
2888c2ecf20Sopenharmony_ci * the buffers originally placed in it. This should only be
2898c2ecf20Sopenharmony_ci * called by one processor after all hardware has finished
2908c2ecf20Sopenharmony_ci * using the pool.
2918c2ecf20Sopenharmony_ci *
2928c2ecf20Sopenharmony_ci * @pool:   Pool to shutdown
2938c2ecf20Sopenharmony_ci * Returns Zero on success
2948c2ecf20Sopenharmony_ci *	   - Positive is count of missing buffers
2958c2ecf20Sopenharmony_ci *	   - Negative is too many buffers or corrupted pointers
2968c2ecf20Sopenharmony_ci */
2978c2ecf20Sopenharmony_ciextern uint64_t cvmx_fpa_shutdown_pool(uint64_t pool);
2988c2ecf20Sopenharmony_ci
2998c2ecf20Sopenharmony_ci/**
3008c2ecf20Sopenharmony_ci * Get the size of blocks controlled by the pool
3018c2ecf20Sopenharmony_ci * This is resolved to a constant at compile time.
3028c2ecf20Sopenharmony_ci *
3038c2ecf20Sopenharmony_ci * @pool:   Pool to access
3048c2ecf20Sopenharmony_ci * Returns Size of the block in bytes
3058c2ecf20Sopenharmony_ci */
3068c2ecf20Sopenharmony_ciuint64_t cvmx_fpa_get_block_size(uint64_t pool);
3078c2ecf20Sopenharmony_ci
3088c2ecf20Sopenharmony_ci#endif /*  __CVM_FPA_H__ */
309