@
Book{Nielsen-book,
author = {M. A. Nielsen and I. L. Chuang},
title = {Quantum Computation and Quantum Infomation},
publisher = {Cambridge Unive. Press},
year = 2000,
address = {Cambridge, MA}
}
@
Article{Calderbank-1997,
title = {Quantum error correction via codes over {GF(4)}},
author = {Calderbank, A. R. and Rains, E. M. and Shor,
P. M. and Sloane, N. J. A.},
journal = {IEEE Trans. Info. Theory},
year = 1998,
volume = 44,
pages = {1369-1387},
abstract = {The problem of finding quantum error correcting
codes is transformed into the problem of finding
additive codes over the field GF(4) which are
self-orthogonal with respect to a certain trace
inner product. Many new codes and new bounds are
presented, as well as a table of upper and lower
bounds on such codes of length up to 30 qubits},
doi = {10.1109/18.681315},
url = {
https://doi.org/10.1109/18.681315},
}
@PhdThesis{gottesman-thesis,
author = {Daniel Gottesman},
title = {Stabilizer Codes and Quantum Error Correction},
school = {Caltech},
year = 1997,
annote = {Controlling operational errors and decoherence is
one of the major challenges facing the field of
quantum computation and other attempts to create
specified many-particle entangled states. The field
of quantum error correction has developed to meet
this challenge. A group-theoretical structure and
associated subclass of quantum codes, the stabilizer
codes, has proved particularly fruitful in producing
codes and in understanding the structure of both
specific codes and classes of codes. I will give an
overview of the field of quantum error correction
and the formalism of stabilizer codes. In the
context of stabilizer codes, I will discuss a
number
of known codes, the capacity of a quantum channel,
bounds on quantum codes, and fault-tolerant quantum
computation.},
url =
"https://arxiv.org/abs/quant-ph/9705052"
}
@
ARTICLE{Ashikhmin-Knill-2001,
author = {Ashikhmin, A. and Knill, E.},
journal = {IEEE Trans. Info. Th.},
title = {Nonbinary quantum stabilizer codes},
year = 2001,
month = {Nov},
volume = 47,
number = 7,
pages = {3065-3072},
abstract = {We define and show how to construct nonbinary
quantum stabilizer codes. Our approach is based on
nonbinary error bases. It generalizes the
relationship between self-orthogonal codes over F4
and binary quantum codes to one between
self-orthogonal codes over F(q2 ) and q-ary quantum
codes for any prime power q},
keywords = {Galois fields;binary codes;quantum
communication;binary quantum codes;coding
theory;encoding algorithms;nonbinary error
bases;nonbinary quantum stabilizer codes;q-ary
quantum codes;self-orthogonal codes;Algorithm design
and analysis;Code standards;Encoding;Fault
tolerance;Galois fields;Linear code;Quantum
computing;Quantum mechanics;Rain;Relativistic
quantum mechanics},
doi = {10.1109/18.959288}
}
@
ARTICLE{Ketkar-Klappenecker-Kumar-Sarvepalli-2006,
author = {Ketkar, A. and Klappenecker, A. and Kumar, S. and
Sarvepalli, P. K.},
journal = {{IEEE} Trans. Info. Th.},
title = {Nonbinary Stabilizer Codes Over Finite Fields},
year = 2006,
month = {Nov},
volume = 52,
number = 11,
pages = {4892-4914},
abstract = {One formidable difficulty in quantum communication
and computation is to protect information-carrying
quantum states against undesired interactions with
the environment. To
address this difficulty, many
good quantum error-correcting codes have been
derived as binary stabilizer codes. Fault-tolerant
quantum computation prompted the study of nonbinary
quantum codes, but the theory of such codes is not
as advanced as that of binary quantum codes. This
paper describes the basic theory of stabilizer codes
over finite fields. The relation between stabilizer
codes and general quantum codes is clarified by
introducing a Galois theory for these objects. A
characterization of nonbinary stabilizer codes over
Fq in terms of classical codes over Fq 2 is provided
that generalizes the well-known notion of additive
codes over F4 of the binary case. This paper also
derives lower and upper bounds on the minimum
distance of stabilizer codes, gives several code
constructions, and derives numerous families of
stabilizer codes, including quantum Hamming codes,
quadratic residue codes, quantum Melas codes,
quantum Bose-Chaudhuri-Hocquenghem (BCH) codes, and
quantum character codes. The puncturing theory by
Rains is generalized to additive codes that are not
necessarily pure. Bounds on the maximal length of
maximum distance separable stabilizer codes are
given. A discussion of open problems concludes this
paper},
keywords = {BCH codes;Galois fields;Hamming codes;error
correction codes;fault tolerant computing;quantum
communication;quantum computing;residue
codes;BCH;Bose-Chaudhuri-Hocquenghem code;Galois
theory;Hamming code;additive code;error-correcting
code;fault-tolerant quantum computation;finite
field;minimum distance;nonbinary stabilizer
code;puncturing theory;quadratic residue
code;quantum Melas code;quantum character
code;quantum communication;Computer science;Error
correction codes;Fault tolerance;Galois
fields;Information processing;Protection;Quantum
computing;Quantum mechanics;Rain;Upper
bound;Bose–Chaudhuri–Hocquenghem (BCH)
codes;MDS codes;Reed-Muller codes;bounds;nonbinary
codes;puncturing;quantum codes;self-orthogonal
codes},
doi = {10.1109/TIT.2006.883612}, eprint={arXiv:quant-ph/0508070}
}
@Unpublished{Gottesman-prime-power-2014,
author = {Daniel Gottesman},
title = {Stabilizer codes with prime power qudits},
note = {Invited talk at QEC 2014 (ETH Zurich)},
url =
{
https://www.qec14.ethz.ch/slides/DanielGottesman.pdf},
year = 2014,
annote = {Quantum error correction of decoherence and faulty
control operations forms the backbone of all of
quantum information processing. Despite remarkable
progress on this front since the discovery of
quantum error correcting codes more than a decade
ago, important open problems in both theory and
applications to real physical systems remain. }
}
@
article{Zeng-Pryadko-2018,
title = {Higher-Dimensional Quantum Hypergraph-Product Codes
with Finite Rates},
author = {Zeng, Weilei and Pryadko, Leonid P.},
journal = {Phys. Rev. Lett.},
volume = 122,
issue = 23,
pages = 230501,
numpages = 6,
year = 2019,
month = {Jun},
publisher = {American Physical Society},
doi = {10.1103/PhysRevLett.122.230501},
eprint = {1810.01519},
url =
{
https://link.aps.org/doi/10.1103/PhysRevLett.122.230501}
}
@
Article{Zeng-Pryadko-hprod-2020,
title = {Minimal distances for certain quantum product codes
and tensor products of chain complexes},
author = {Zeng, Weilei and Pryadko, Leonid P.},
journal = {Phys. Rev. A},
volume = 102,
issue = 6,
pages = 062402,
numpages = 15,
year = 2020,
doi = {10.1103/PhysRevA.102.062402},
url =
{
https://link.aps.org/doi/10.1103/PhysRevA.102.062402},
eprint = {arXiv:2007.12152},
annote = {We use a map to quantum error-correcting codes and a
subspace projection to get lower bounds for minimal
homological distances in a tensor product of two
chain complexes of vector spaces over a finite
field. Homology groups of such a complex are
described by the Künneth theorem. We give an
explicit expression for the distances when one of
the complexes is a linear map between two
spaces. The codes in the construction, subsystem
product codes and their gauge-fixed variants,
generalize several known families of quantum
error-correcting codes.}
}
@
ARTICLE{Leon-1988,
author = {Leon, J. S.},
journal = {IEEE Trans. Info. Theory},
title = {A probabilistic algorithm for computing minimum
weights of large error-correcting codes},
year = 1988,
month = {Sep},
volume = 34,
number = 5,
pages = {1354 -1359},
abstract = {An algorithm is developed that can be used to find,
with a very low probability of error (10-100 or less
in many cases), the minimum weights of codes far too
large to be treated by any known exact
algorithm. The probabilistic method is used to find
minimum weights of all extended quadratic residue
codes of length 440 or less. The probabilistic
algorithm is presented for binary codes, but it can
be generalized to codes over GF(q) with q gt;2},
keywords = {Binary codes;Computer errors;Computer science;Error
correction codes;Linear code;Mathematics;National
security;Statistics;error correction
codes;probability;binary codes;error-correcting
codes;minimum weights;probabilistic
algorithm;quadratic residue codes;},
doi = {10.1109/18.21270},
ISSN = {0018-9448},
}
@
Article{Kruk-1989,
author = {E. A. Kruk},
title = {Decoding Complexity Bound for Linear Block Codes},
journal = {Probl. Peredachi Inf.},
year = 1989,
volume = 25,
number = 3,
pages = {103-107},
note = {(In Russian)},
url = {
http://mi.mathnet.ru/eng/ppi665},
annote = {A new complexity bound is derived for
maximum-likelihood decoding of linear block codes in
a memoryless q-ary symmetric channel. The bound is
the best among all known bounds in the entire range
of code rates. }
}
@
ARTICLE{Coffey-Goodman-1990,
author = {Coffey, J. T. and Goodman, R. M.},
journal = {IEEE Trans. Info. Theory},
title = {The complexity of information set decoding},
year = {1990},
month = {Sep},
volume = {36},
number = {5},
pages = {1031 -1037},
abstract = {Information set decoding is an algorithm for
decoding any linear code. Expressions for the
complexity of the procedure that are logarithmically
exact for virtually all codes are presented. The
expressions cover the cases of complete minimum
distance decoding and bounded hard-decision
decoding, as well as the important case of bounded
soft-decision decoding. It is demonstrated that
these results are vastly better than those for the
trivial algorithms of searching through all
codewords or through all syndromes, and are
significantly better than those for any other
general algorithm currently known. For codes over
large symbol fields, the procedure tends towards a
complexity that is subexponential in the symbol
size},
keywords = {Decoding;Error correction;Helium;Linear
code;Polynomials;Redundancy;Vectors;decoding;bounded
hard-decision decoding;bounded soft-decision
decoding;complete minimum distance
decoding;complexity;information set decoding;linear
code;},
doi = {10.1109/18.57202},
ISSN = {0018-9448},
}
@
Article{Cuellar-etal-2020,
author = {M. P. Cu{
\'e}llar and G{
\'o}mez-Torrecillas, J. and
F. J. Lobillo and G. Navarro},
title = {Genetic algorithms with permutation-based
representation for computing the distance of linear
codes},
year = 2021,
journal = {Swarm and Evolutionary Computation},
volume = 60,
pages = 100797,
eprint = {arXiv:2002.12330},
doi = {10.1016/j.swevo.2020.100797},
annote = {Finding the minimum distance of linear codes is an
NP-hard problem. Traditionally, this computation has
been addressed by means of the design of algorithms
that find, by a clever exhaustive search, a linear
combination of some generating matrix rows that
provides a codeword with minimum weight. Therefore,
as the dimension of the code or the size of the
underlying finite field increase, so it does
exponentially the run time. In this work, we prove
that, given a generating matrix, there exists a
column permutation which leads to a reduced row
echelon form containing a row whose weight is the
code distance. This result enables the use of
permutations as representation scheme, in contrast
to the usual discrete representation, which makes
the search of the optimum polynomial time dependent
from the base field. In particular, we have
implemented genetic and CHC algorithms using this
representation as a proof of concept. Experimental
results have been carried out employing codes over
fields with two and eight elements, which suggests
that evolutionary algorithms with our proposed
permutation encoding are competitive with regard to
existing methods in the literature. As a by-product,
we have found and amended some inaccuracies in the
MAGMA Computational Algebra System concerning the
stored distances of some linear codes.}
}
@
article{Dumer-Kovalev-Pryadko-bnd-2015,
title = {Thresholds for correcting errors, erasures, and
faulty syndrome measurements in degenerate quantum
codes},
author = {Dumer, I. and Kovalev, A. A. and Pryadko, L. P.},
journal = {Phys. Rev. Lett.},
volume = 115,
pages = 050502,
numpages = 5,
year = 2015,
publisher = {American Physical Society},
doi = {10.1103/PhysRevLett.115.050502},
url =
{
https://doi.org/10.1103/PhysRevLett.115.050502},
eprint = {1412.6172},
annote = {We suggest a technique for constructing lower
(existence) bounds for the fault-tolerant threshold
to scalable quantum computation applicable to
degenerate quantum codes with sublinear distance
scaling. We give explicit analytic expressions
combining probabilities of erasures, depolarizing
errors, and phenomenological syndrome measurement
errors for quantum LDPC codes with logarithmic or
larger distances. These threshold estimates are
parametrically better than the existing analytical
bound based on percolation. }
}
@
ARTICLE{Dumer-Kovalev-Pryadko-IEEE-2017,
author = {I. Dumer and A. A. Kovalev and L. P. Pryadko},
journal = {IEEE Trans. Inf. Th.},
title = {Distance Verification for Classical and Quantum {LDPC}
Codes},
year = 2017,
volume = 63,
number = 7,
pages = {4675-4686},
abstract = {The techniques of distance verification known for
general linear codes are first applied to the
quantum stabilizer codes. Then, these techniques are
considered for classical and quantum (stabilizer)
low-density-parity-check (LDPC) codes. New
complexity bounds for distance verification with
provable performance are derived using the average
weight spectra of the ensembles of LDPC codes. These
bounds are expressed in terms of the
erasure-correcting capacity of the corresponding
ensemble. We also present a new irreducible-cluster
technique that can be applied to any LDPC code and
takes advantage of parity-checks
' sparsity for both
the classical and quantum LDPC codes. This technique
reduces complexity exponents of all existing
deterministic techniques designed for generic
stabilizer codes with small relative distances,
which also include all known families of the quantum
stabilizer LDPC codes.},
keywords = {parity check codes;classical LDPC codes;complexity
exponents;distance verification;erasure-correcting
capacity;general linear codes;generic stabilizer
codes;irreducible-cluster technique;low-density
parity check codes;quantum LDPC codes;quantum
stabilizer codes;Algorithm design and
analysis;Complexity
theory;Decoding;Generators;Linear codes;Parity check
codes;Quantum computing;Distance verification;LDPC
codes;erasure correction;list decoding;quantum
stabilizer codes},
doi = {10.1109/TIT.2017.2690381},
ISSN = {0018-9448},
month = {July},
}
@TechReport{Steel-1953,
author = {R. G. D. Steel},
title = {Relation Between {P}oisson and Multinomial
Distributions},
institution = {Cornell University},
year = 1953,
type = {Biometrics Unit Technical Reports},
number = {BU-39-M},
url =
"https://ecommons.cornell.edu/handle/1813/32480",
annote = {Relation between k trials on independent Poisson
populations with different means and the fixed total n, and a multinomual
distribution with k outcomes.}
}
@
article{Chernoff-Lehmann-1954,
fullauthor = {Herman Chernoff and E. L. Lehmann},
title = {The Use of Maximum Likelihood Estimates in $
\chi^2$
Tests for Goodness of Fit},
year = 1954,
author = {H. Chernoff and E. L. Lehmann},
journal = {The Annals of Mathematical Statistics},
volume = 25,
number = 3,
publisher = {Institute of Mathematical Statistics},
pages = {579 -- 586},
abstract = {The usual test that a sample comes from a
distribution of given form is performed by counting
the
number of observations falling into specified
cells and applying the $
\chi^2$ test to these
frequencies. In estimating the parameters for this
test, one may use the maximum likelihood (or
equivalent) estimate based (1) on the cell
frequencies, or (2) on the original
observations. This paper shows that in (2), unlike
the well known result for (1), the test statistic
does not have a limiting $
\chi^2$-distribution, but
that it is stochastically larger than would be
expected under the $
\chi^2$ theory. The limiting
distribution is obtained and some examples are
computed. These indicate that the error is not
serious in the case of fitting a Poisson
distribution, but may be so for the fitting of a
normal.},
doi = {10.1214/aoms/1177728726},
URL = {
https://doi.org/10.1214/aoms/1177728726}
}
@
book{Cramer-book-1999,
title = {Mathematical Methods of Statistics ({PMS}-9)},
author = {Harald Cram{
\'e}r},
publisher = {Princeton University Press},
year = 1999,
ISBN = 9780691005478,
URL = {
https://www.jstor.org/stable/j.ctt1bpm9r4},
abstract = {In this classic of statistical mathematical theory,
Harald Cramér joins the two major lines of
development in the field: while British and American
statisticians were developing the science of
statistical inference, French and Russian
probabilitists transformed the classical calculus of
probability into a rigorous and pure mathematical
theory. The result of Cramér
's work is a masterly
exposition of the mathematical methods of modern
statistics that set the standard that others have
since sought to follow.For anyone with a working
knowledge of undergraduate mathematics the
book is
self contained. The first part is an introduction to
the fundamental concept of a distribution and of
integration with respect to a distribution. The
second part contains the general theory of random
variables and probability distributions while the
third is devoted to the theory of sampling,
statistical estimation, and tests of significance.}
}
@misc{ConwayPol-2022,
author = {Frank L{
\"u}beck},
title = {Conway polynomials for finite fields},
url =
{
https://www.math.rwth-aachen.de/~Frank.Luebeck/data/ConwayPol/},
note = {[Downloaded on 2022-02-19]},
year = 2021,
annote = {Extensive list of Conway polynomials used to define
extension Galois fields.}
}