/* List of machines that are known to not support SHM bridge correctly. */ staticconstchar *const qcom_tzmem_blacklist[] = { "qcom,sc7180", /* hang in rmtfs memory assignment */ "qcom,sc8180x", "qcom,sdm670", /* failure in GPU firmware loading */ "qcom,sdm845", /* reset in rmtfs memory assignment */ "qcom,sm7150", /* reset in rmtfs memory assignment */ "qcom,sm8150", /* reset in rmtfs memory assignment */
NULL
};
staticint qcom_tzmem_init(void)
{ constchar *const *platform; int ret;
for (platform = qcom_tzmem_blacklist; *platform; platform++) { if (of_machine_is_compatible(*platform)) goto notsupp;
}
ret = qcom_scm_shm_bridge_enable(qcom_tzmem_dev); if (ret == -EOPNOTSUPP) goto notsupp;
if (!ret)
qcom_tzmem_using_shm_bridge = true;
return ret;
notsupp:
dev_info(qcom_tzmem_dev, "SHM Bridge not supported\n"); return 0;
}
/** * qcom_tzmem_pool_new() - Create a new TZ memory pool. * @config: Pool configuration. * * Create a new pool of memory suitable for sharing with the TrustZone. * * Must not be used in atomic context. * * Return: New memory pool address or ERR_PTR() on error.
*/ struct qcom_tzmem_pool *
qcom_tzmem_pool_new(conststruct qcom_tzmem_pool_config *config)
{ int ret = -ENOMEM;
might_sleep();
switch (config->policy) { case QCOM_TZMEM_POLICY_STATIC: if (!config->initial_size) return ERR_PTR(-EINVAL); break; case QCOM_TZMEM_POLICY_MULTIPLIER: if (!config->increment) return ERR_PTR(-EINVAL); break; case QCOM_TZMEM_POLICY_ON_DEMAND: break; default: return ERR_PTR(-EINVAL);
}
/** * qcom_tzmem_pool_free() - Destroy a TZ memory pool and free all resources. * @pool: Memory pool to free. * * Must not be called if any of the allocated chunks has not been freed. * Must not be used in atomic context.
*/ void qcom_tzmem_pool_free(struct qcom_tzmem_pool *pool)
{ struct qcom_tzmem_area *area, *next; struct qcom_tzmem_chunk *chunk; struct radix_tree_iter iter; bool non_empty = false; void __rcu **slot;
/** * devm_qcom_tzmem_pool_new() - Managed variant of qcom_tzmem_pool_new(). * @dev: Device managing this resource. * @config: Pool configuration. * * Must not be used in atomic context. * * Return: Address of the managed pool or ERR_PTR() on failure.
*/ struct qcom_tzmem_pool *
devm_qcom_tzmem_pool_new(struct device *dev, conststruct qcom_tzmem_pool_config *config)
{ struct qcom_tzmem_pool *pool; int ret;
pool = qcom_tzmem_pool_new(config); if (IS_ERR(pool)) return pool;
ret = devm_add_action_or_reset(dev, devm_qcom_tzmem_pool_free, pool); if (ret) return ERR_PTR(ret);
/** * qcom_tzmem_alloc() - Allocate a memory chunk suitable for sharing with TZ. * @pool: TZ memory pool from which to allocate memory. * @size: Number of bytes to allocate. * @gfp: GFP flags. * * Can be used in any context. * * Return: * Address of the allocated buffer or NULL if no more memory can be allocated. * The buffer must be released using qcom_tzmem_free().
*/ void *qcom_tzmem_alloc(struct qcom_tzmem_pool *pool, size_t size, gfp_t gfp)
{ unsignedlong vaddr; int ret;
/** * qcom_tzmem_free() - Release a buffer allocated from a TZ memory pool. * @vaddr: Virtual address of the buffer. * * Can be used in any context.
*/ void qcom_tzmem_free(void *vaddr)
{ struct qcom_tzmem_chunk *chunk;
/** * qcom_tzmem_to_phys() - Map the virtual address of TZ memory to physical. * @vaddr: Virtual address of memory allocated from a TZ memory pool. * * Can be used in any context. The address must point to memory allocated * using qcom_tzmem_alloc(). * * Returns: * Physical address mapped from the virtual or 0 if the mapping failed.
*/
phys_addr_t qcom_tzmem_to_phys(void *vaddr)
{ struct qcom_tzmem_chunk *chunk; struct radix_tree_iter iter; void __rcu **slot;
phys_addr_t ret;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.