Current File : //usr/lib64/python2.7/site-packages/numpy/linalg/linalg.pyo
�
E�`Qc@sadZddddddddd	d
ddd
dddddddgZddlmZmZmZmZmZmZm	Z	m
Z
mZmZm
Z
mZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZm Z m!Z!m"Z"m#Z#ddl$m%Z%ddl&m'Z'ddl(m)Z)ddl*m+Z+e+d�Z,e+d�Z-e+d�Z.e+d�Z/e+d�Z0eZ1de2fd ��YZ3d!�Z4d"�Z5ie	e	6e
e
6e	e6e
e6Z6iee	6ee
6ee6ee6Z7e
d#�Z8ed$�Z9d%�Z:iee	6ee
6ee6ee6Z7d&�Z;eZ<d'�Z=d(�Z>d)�Z?d*�Z@d+�ZAd,�ZBdDd-�ZDd.�ZEd/d0�ZFd1�ZGd2�ZHd3d4�ZId5�ZJdd6�ZKd7�ZLd8�ZMdd9�ZNd:d:d;�ZOdDd<�ZPdDd=�ZQd>d?�ZRd@�ZSdA�ZTddB�ZUdDdC�ZVdDS(EsxLite version of scipy.linalg.

Notes
-----
This module is a lite version of the linalg.py module in SciPy which
contains high-level Python interface to the LAPACK library.  The lite
version only accesses the following LAPACK functions: dgesv, zgesv,
dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
tmatrix_powertsolvettensorsolvet	tensorinvtinvtcholeskyteigvalsteigvalshtpinvtslogdettdettsvdteigteightlstsqtnormtqrtcondtmatrix_ranktLinAlgErrori����(!tarraytasarraytzerostemptyt	transposetintctsingletdoubletcsingletcdoubletinexacttcomplexfloatingtnewaxistraveltalltInftdottaddtmultiplytidentitytsqrttmaximumtflatnonzerotdiagonaltarangetfastCopyAndTransposetsumtisfinitetsizetfinfotabsolutetlogtexp(ttriu(tlapack_lite(R(tasbytestNtVtAtStLcBseZdZRS(s
    Generic Python-exception-derived object raised by linalg functions.

    General purpose exception class, derived from Python's exception.Exception
    class, programmatically raised in linalg functions when a Linear
    Algebra-related condition would prevent further correct execution of the
    function.

    Parameters
    ----------
    None

    Examples
    --------
    >>> from numpy import linalg as LA
    >>> LA.inv(np.zeros((2,2)))
    Traceback (most recent call last):
      File "<stdin>", line 1, in <module>
      File "...linalg.py", line 350,
        in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
      File "...linalg.py", line 249,
        in solve
        raise LinAlgError('Singular matrix')
    numpy.linalg.LinAlgError: Singular matrix

    (t__name__t
__module__t__doc__(((s9/usr/lib64/python2.7/site-packages/numpy/linalg/linalg.pyR%scCs+t|�}t|d|j�}||fS(Nt__array_prepare__(Rtgetattrt__array_wrap__(tatnewtwrap((s9/usr/lib64/python2.7/site-packages/numpy/linalg/linalg.pyt
_makearrayBscCs
t|t�S(N(t
issubclassR(tt((s9/usr/lib64/python2.7/site-packages/numpy/linalg/linalg.pyt
isComplexTypeGscCstj||�S(N(t_real_types_maptget(RHtdefault((s9/usr/lib64/python2.7/site-packages/numpy/linalg/linalg.pyt	_realTypeTscCstj||�S(N(t_complex_types_mapRK(RHRL((s9/usr/lib64/python2.7/site-packages/numpy/linalg/linalg.pyt_complexTypeWscCstS(s,Cast the type t to either double or cdouble.(R(RH((s9/usr/lib64/python2.7/site-packages/numpy/linalg/linalg.pyt_linalgRealTypeZscGs�t}t}x�|D]�}t|jjt�r�t|jj�rIt}nt|jjdd�}|dkr�t
d|jjf��q�nt}|tkrt}qqW|r�t
}t|}nt}||fS(NRLs&array type %s is unsupported in linalg(RtFalseRGtdtypettypeRRItTrueRMtNonet	TypeErrortnameRRRN(tarraystresult_typet
is_complexRCtrtRH((s9/usr/lib64/python2.7/site-packages/numpy/linalg/linalg.pyt_commonTypecs$
	

cGs�g}xU|D]M}|jjdkrM|jt|d|jjd���q
|j|�q
Wt|�dkrx|dS|SdS(Nt=t|RRii(R]R^(RRt	byteordertappendRtnewbyteordertlen(RXtrettarr((s9/usr/lib64/python2.7/site-packages/numpy/linalg/linalg.pyt_to_native_byte_orders
(cGs}d}xR|D]J}|jj|kr;|t|�f}q
|t|j|��f}q
Wt|�dkru|dS|SdS(Nii((RRRSt_fastCTtastypeRb(RSRXtcast_arraysRC((s9/usr/lib64/python2.7/site-packages/numpy/linalg/linalg.pyt_fastCopyAndTranspose�s
 cGsFx?|D]7}t|j�dkrtdt|j���qqWdS(Nis9%d-dimensional array given. Array must be two-dimensional(RbtshapeR(RXRC((s9/usr/lib64/python2.7/site-packages/numpy/linalg/linalg.pyt_assertRank2�s
cGsBx;|D]3}t|j�t|j�krtd��qqWdS(NsArray must be square(tmaxRjtminR(RXRC((s9/usr/lib64/python2.7/site-packages/numpy/linalg/linalg.pyt_assertSquareness�s
cGs6x/|D]'}t|�j�std��qqWdS(Ns#Array must not contain infs or NaNs(R/R"R(RXRC((s9/usr/lib64/python2.7/site-packages/numpy/linalg/linalg.pyt
_assertFinite�s
cGs6x/|D]'}t|�dkrtd��qqWdS(NisArrays cannot be empty(R0R(RXRC((s9/usr/lib64/python2.7/site-packages/numpy/linalg/linalg.pyt_assertNonEmpty�s
c
Cs�t|�\}}t|�}|j}|dk	r�td|�}x+|D]#}|j|�|j||�qIW|j|�}n|j||j}d}x|D]}||9}q�W|j	d|�}|j
�}|t||��}	||	_|	S(s

    Solve the tensor equation ``a x = b`` for x.

    It is assumed that all indices of `x` are summed over in the product,
    together with the rightmost indices of `a`, as is done in, for example,
    ``tensordot(a, x, axes=len(b.shape))``.

    Parameters
    ----------
    a : array_like
        Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
        the shape of that sub-tensor of `a` consisting of the appropriate
        number of its rightmost indices, and must be such that
       ``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
       'square').
    b : array_like
        Right-hand tensor, which can be of any shape.
    axes : tuple of ints, optional
        Axes in `a` to reorder to the right, before inversion.
        If None (default), no reordering is done.

    Returns
    -------
    x : ndarray, shape Q

    Raises
    ------
    LinAlgError
        If `a` is singular or not 'square' (in the above sense).

    See Also
    --------
    tensordot, tensorinv, einsum

    Examples
    --------
    >>> a = np.eye(2*3*4)
    >>> a.shape = (2*3, 4, 2, 3, 4)
    >>> b = np.random.randn(2*3, 4)
    >>> x = np.linalg.tensorsolve(a, b)
    >>> x.shape
    (2, 3, 4)
    >>> np.allclose(np.tensordot(a, x, axes=3), b)
    True

    iii����N(RFRtndimRUtrangetremovetinsertRRjtreshapeR!R(
RCtbtaxesREtantallaxestktoldshapetprodtres((s9/usr/lib64/python2.7/site-packages/numpy/linalg/linalg.pyR�s$/	


	c	Cs�t|�\}}t|�\}}t|j�dk}|rX|dd�tf}nt||�t|�|jd}|jd}||jdkr�td��nt||�\}}t|�r�t	j
}	n	t	j}	t|||�\}}t
||�\}}t|t�}
|	|||||
||d�}|ddkr]td��n|r|||j�j|��S||j�j|��SdS(s�
    Solve a linear matrix equation, or system of linear scalar equations.

    Computes the "exact" solution, `x`, of the well-determined, i.e., full
    rank, linear matrix equation `ax = b`.

    Parameters
    ----------
    a : (M, M) array_like
        Coefficient matrix.
    b : {(M,), (M, N)}, array_like
        Ordinate or "dependent variable" values.

    Returns
    -------
    x : {(M,), (M, N)} ndarray
        Solution to the system a x = b.  Returned shape is identical to `b`.

    Raises
    ------
    LinAlgError
        If `a` is singular or not square.

    Notes
    -----
    `solve` is a wrapper for the LAPACK routines `dgesv`_ and
    `zgesv`_, the former being used if `a` is real-valued, the latter if
    it is complex-valued.  The solution to the system of linear equations
    is computed using an LU decomposition [1]_ with partial pivoting and
    row interchanges.

    .. _dgesv: http://www.netlib.org/lapack/double/dgesv.f

    .. _zgesv: http://www.netlib.org/lapack/complex16/zgesv.f

    `a` must be square and of full-rank, i.e., all rows (or, equivalently,
    columns) must be linearly independent; if either is not true, use
    `lstsq` for the least-squares best "solution" of the
    system/equation.

    References
    ----------
    .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
           FL, Academic Press, Inc., 1980, pg. 22.

    Examples
    --------
    Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``:

    >>> a = np.array([[3,1], [1,2]])
    >>> b = np.array([9,8])
    >>> x = np.linalg.solve(a, b)
    >>> x
    array([ 2.,  3.])

    Check that the solution is correct:

    >>> (np.dot(a, x) == b).all()
    True

    iNisIncompatible dimensionstinfosSingular matrix(RFRbRjR RkRnRR\RIR6tzgesvtdgesvRiReRtfortran_intR!RgR(RCRvt_REtone_eqtn_eqtn_rhsRHtresult_ttlapack_routinetpivotstresults((s9/usr/lib64/python2.7/site-packages/numpy/linalg/linalg.pyR�s0>



	!icCs�t|�}|j}d}|dkr[|||| }x+||D]}||9}qDWntd��|j|d�}t|�}|j|�S(s�
    Compute the 'inverse' of an N-dimensional array.

    The result is an inverse for `a` relative to the tensordot operation
    ``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,
    ``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the
    tensordot operation.

    Parameters
    ----------
    a : array_like
        Tensor to 'invert'. Its shape must be 'square', i. e.,
        ``prod(a.shape[:ind]) == prod(a.shape[ind:])``.
    ind : int, optional
        Number of first indices that are involved in the inverse sum.
        Must be a positive integer, default is 2.

    Returns
    -------
    b : ndarray
        `a`'s tensordot inverse, shape ``a.shape[:ind] + a.shape[ind:]``.

    Raises
    ------
    LinAlgError
        If `a` is singular or not 'square' (in the above sense).

    See Also
    --------
    tensordot, tensorsolve

    Examples
    --------
    >>> a = np.eye(4*6)
    >>> a.shape = (4, 6, 8, 3)
    >>> ainv = np.linalg.tensorinv(a, ind=2)
    >>> ainv.shape
    (8, 3, 4, 6)
    >>> b = np.random.randn(4, 6)
    >>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))
    True

    >>> a = np.eye(4*6)
    >>> a.shape = (24, 8, 3)
    >>> ainv = np.linalg.tensorinv(a, ind=1)
    >>> ainv.shape
    (8, 3, 24)
    >>> b = np.random.randn(24)
    >>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
    True

    iisInvalid ind argument.i����(RRjt
ValueErrorRuR(RCtindR{R|tinvshapeRztia((s9/usr/lib64/python2.7/site-packages/numpy/linalg/linalg.pyROs5	cCs;t|�\}}|t|t|jdd|j���S(sy
    Compute the (multiplicative) inverse of a matrix.

    Given a square matrix `a`, return the matrix `ainv` satisfying
    ``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.

    Parameters
    ----------
    a : (M, M) array_like
        Matrix to be inverted.

    Returns
    -------
    ainv : (M, M) ndarray or matrix
        (Multiplicative) inverse of the matrix `a`.

    Raises
    ------
    LinAlgError
        If `a` is singular or not square.

    Examples
    --------
    >>> from numpy import linalg as LA
    >>> a = np.array([[1., 2.], [3., 4.]])
    >>> ainv = LA.inv(a)
    >>> np.allclose(np.dot(a, ainv), np.eye(2))
    True
    >>> np.allclose(np.dot(ainv, a), np.eye(2))
    True

    If a is a matrix object, then the return value is a matrix as well:

    >>> ainv = LA.inv(np.matrix(a))
    >>> ainv
    matrix([[-2. ,  1. ],
            [ 1.5, -0.5]])

    iRR(RFRR'RjRR(RCRE((s9/usr/lib64/python2.7/site-packages/numpy/linalg/linalg.pyR�s(c	Cst|�\}}t|�t|�t|�\}}t||�}t|�}|jd}|jd}t|�r�tj	}n	tj
}|t|||d�}|ddkr�td��nt
|dd�j�}|j|kr�|j|�}n||�S(sx
    Cholesky decomposition.

    Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,
    where `L` is lower-triangular and .H is the conjugate transpose operator
    (which is the ordinary transpose if `a` is real-valued).  `a` must be
    Hermitian (symmetric if real-valued) and positive-definite.  Only `L` is
    actually returned.

    Parameters
    ----------
    a : (M, M) array_like
        Hermitian (symmetric if all elements are real), positive-definite
        input matrix.

    Returns
    -------
    L : {(M, M) ndarray, (M, M) matrix}
        Lower-triangular Cholesky factor of `a`.  Returns a matrix object
        if `a` is a matrix object.

    Raises
    ------
    LinAlgError
       If the decomposition fails, for example, if `a` is not
       positive-definite.

    Notes
    -----
    The Cholesky decomposition is often used as a fast way of solving

    .. math:: A \mathbf{x} = \mathbf{b}

    (when `A` is both Hermitian/symmetric and positive-definite).

    First, we solve for :math:`\mathbf{y}` in

    .. math:: L \mathbf{y} = \mathbf{b},

    and then for :math:`\mathbf{x}` in

    .. math:: L.H \mathbf{x} = \mathbf{y}.

    Examples
    --------
    >>> A = np.array([[1,-2j],[2j,5]])
    >>> A
    array([[ 1.+0.j,  0.-2.j],
           [ 0.+2.j,  5.+0.j]])
    >>> L = np.linalg.cholesky(A)
    >>> L
    array([[ 1.+0.j,  0.+0.j],
           [ 0.+2.j,  1.+0.j]])
    >>> np.dot(L, L.T.conj()) # verify that L * L.H = A
    array([[ 1.+0.j,  0.-2.j],
           [ 0.+2.j,  5.+0.j]])
    >>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?
    >>> np.linalg.cholesky(A) # an ndarray object is returned
    array([[ 1.+0.j,  0.+0.j],
           [ 0.+2.j,  1.+0.j]])
    >>> # But a matrix object is returned if A is a matrix object
    >>> LA.cholesky(np.matrix(A))
    matrix([[ 1.+0.j,  0.+0.j],
            [ 0.+2.j,  1.+0.j]])

    iiR~sKMatrix is not positive definite - Cholesky decomposition cannot be computedRz(RFRkRnR\RiReRjRIR6tzpotrftdpotrft_LRR5RRRRg(	RCRERHR�tmtnR�R�ts((s9/usr/lib64/python2.7/site-packages/numpy/linalg/linalg.pyR�s$C



	tfullc
Csxt|�\}}t|�t|�|j\}}t|�\}}t||�}t|�}t||�}t|f|�}t	|�r�t
j}	d}
nt
j}	d}
d}t|f|�}|	||||||dd�}
|
ddkrt
d|
|
df��ntt|d��}t|f|�}|	|||||||d�}
|
ddkr�t
d|
|
df��n|ddkr�||kr�|j|�}n|jSt||d	d	�d	|�f�}x1t|�D]#}||d	|�fjd
�q�W|ddkr.|St	|�rLt
j}	d}
nt
j}	d
}
d}t|f|�}|	|||||||dd�	}
|
ddkr�t
d|
|
df��ntt|d��}t|f|�}|	||||||||d�	}
|
ddkr=t
d|
|
df��nt||d	|�d	d	�f�}||�||�fS(s'
    Compute the qr factorization of a matrix.

    Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
    upper-triangular.

    Parameters
    ----------
    a : array_like
        Matrix to be factored, of shape (M, N).
    mode : {'full', 'r', 'economic'}, optional
        Specifies the values to be returned. 'full' is the default.
        Economic mode is slightly faster then 'r' mode if only `r` is needed.

    Returns
    -------
    q : ndarray of float or complex, optional
        The orthonormal matrix, of shape (M, K). Only returned if
        ``mode='full'``.
    r : ndarray of float or complex, optional
        The upper-triangular matrix, of shape (K, N) with K = min(M, N).
        Only returned when ``mode='full'`` or ``mode='r'``.
    a2 : ndarray of float or complex, optional
        Array of shape (M, N), only returned when ``mode='economic``'.
        The  diagonal and the upper triangle of `a2` contains `r`, while
        the rest of the matrix is undefined.

    Raises
    ------
    LinAlgError
        If factoring fails.

    Notes
    -----
    This is an interface to the LAPACK routines dgeqrf, zgeqrf,
    dorgqr, and zungqr.

    For more information on the qr factorization, see for example:
    http://en.wikipedia.org/wiki/QR_factorization

    Subclasses of `ndarray` are preserved, so if `a` is of type `matrix`,
    all the return values will be matrices too.

    Examples
    --------
    >>> a = np.random.randn(9, 6)
    >>> q, r = np.linalg.qr(a)
    >>> np.allclose(a, np.dot(q, r))  # a does equal qr
    True
    >>> r2 = np.linalg.qr(a, mode='r')
    >>> r3 = np.linalg.qr(a, mode='economic')
    >>> np.allclose(r, r2)  # mode='r' returns the same r as mode='full'
    True
    >>> # But only triu parts are guaranteed equal when mode='economic'
    >>> np.allclose(r, np.triu(r3[:6,:6], k=0))
    True

    Example illustrating a common use of `qr`: solving of least squares
    problems

    What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for
    the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points
    and you'll see that it should be y0 = 0, m = 1.)  The answer is provided
    by solving the over-determined matrix equation ``Ax = b``, where::

      A = array([[0, 1], [1, 1], [1, 1], [2, 1]])
      x = array([[y0], [m]])
      b = array([[1], [0], [2], [1]])

    If A = qr such that q is orthonormal (which is always possible via
    Gram-Schmidt), then ``x = inv(r) * (q.T) * b``.  (In numpy practice,
    however, we simply use `lstsq`.)

    >>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])
    >>> A
    array([[0, 1],
           [1, 1],
           [1, 1],
           [2, 1]])
    >>> b = np.array([1, 0, 2, 1])
    >>> q, r = LA.qr(A)
    >>> p = np.dot(q.T, b)
    >>> np.dot(LA.inv(r), p)
    array([  1.1e-16,   1.0e+00])

    tzgeqrftdgeqrfii����iR~s
%s returns %dteNgtrtzungqrtdorgqr(RFRkRpRjR\RiReRmRRIR6R�R�RtinttabsRgtTRrtfillR�R�(RCtmodeRER�R�RHR�tmnttauR�troutine_nametlworktworkR�R�titq((s9/usr/lib64/python2.7/site-packages/numpy/linalg/linalg.pyRsdW

			!!%!			$$%cCs�t|�\}}t|�t|�t|�t|�\}}t|�}t||�}t|�}|jd}t	d|�}t
|�rgtj}t	|f|�}t	|f|�}	d}
t	|
f|�}|t
t
|||||d|d|d|	d�}tt|d��}
t	|
f|�}|t
t
|||||d|d||
|	d�}ntj}t	|f|�}
t	|f|�}d}
t	|
f|�}|t
t
||||
||d|d|dd�}t|d�}
t	|
f|�}|t
t
||||
||d|d||
d�}t|dk�r[|
}t|�}n|
d|}t|�}|ddkr�td��n|j|�S(	sx
    Compute the eigenvalues of a general matrix.

    Main difference between `eigvals` and `eig`: the eigenvectors aren't
    returned.

    Parameters
    ----------
    a : (M, M) array_like
        A complex- or real-valued matrix whose eigenvalues will be computed.

    Returns
    -------
    w : (M,) ndarray
        The eigenvalues, each repeated according to its multiplicity.
        They are not necessarily ordered, nor are they necessarily
        real for real matrices.

    Raises
    ------
    LinAlgError
        If the eigenvalue computation does not converge.

    See Also
    --------
    eig : eigenvalues and right eigenvectors of general arrays
    eigvalsh : eigenvalues of symmetric or Hermitian arrays.
    eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.

    Notes
    -----
    This is a simple interface to the LAPACK routines dgeev and zgeev
    that sets those routines' flags to return only the eigenvalues of
    general real and complex arrays, respectively.

    Examples
    --------
    Illustration, using the fact that the eigenvalues of a diagonal matrix
    are its diagonal elements, that multiplying a matrix on the left
    by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose
    of `Q`), preserves the eigenvalues of the "middle" matrix.  In other words,
    if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as
    ``A``:

    >>> from numpy import linalg as LA
    >>> x = np.random.random()
    >>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
    >>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
    (1.0, 1.0, 0.0)

    Now multiply a diagonal matrix by Q on one side and by Q.T on the other:

    >>> D = np.diag((-1,1))
    >>> LA.eigvals(D)
    array([-1.,  1.])
    >>> A = np.dot(Q, D)
    >>> A = np.dot(A, Q.T)
    >>> LA.eigvals(A)
    array([ 1., -1.])

    iii����gy�?R~sEigenvalues did not converge(i(RFRkRnRoR\RPRiReRjRRIR6tzgeevt_NR�R�tdgeevR"RMRORRg(RCRERHR�treal_tR�tdummyR�twtrworkR�R�R�twrtwi((s9/usr/lib64/python2.7/site-packages/numpy/linalg/linalg.pyR�sR>



	!	cCsqt|�}t|�\}}t|�t|�t|�\}}t|�}t||�}t|�}|jd}d|d}t	|ft
�}t|�r�tj
}	t	|f|�}
d}t	|f|�}d}
t	|
f|�}|	t|||||
|d|d||d�
}tt|d��}t	|f|�}t|d�}
t	|
f|�}|	t|||||
||||
||d�
}n�tj}	t	|f|�}
d}t	|f|�}|	t|||||
|d||d�}t|d�}t	|f|�}|	t|||||
||||d�}|ddkrdtd��n|
j|�S(sR
    Compute the eigenvalues of a Hermitian or real symmetric matrix.

    Main difference from eigh: the eigenvectors are not computed.

    Parameters
    ----------
    a : (M, M) array_like
        A complex- or real-valued matrix whose eigenvalues are to be
        computed.
    UPLO : {'L', 'U'}, optional
        Specifies whether the calculation is done with the lower triangular
        part of `a` ('L', default) or the upper triangular part ('U').

    Returns
    -------
    w : (M,) ndarray
        The eigenvalues, not necessarily ordered, each repeated according to
        its multiplicity.

    Raises
    ------
    LinAlgError
        If the eigenvalue computation does not converge.

    See Also
    --------
    eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
    eigvals : eigenvalues of general real or complex arrays.
    eig : eigenvalues and right eigenvectors of general real or complex
          arrays.

    Notes
    -----
    This is a simple interface to the LAPACK routines dsyevd and zheevd
    that sets those routines' flags to return only the eigenvalues of
    real symmetric and complex Hermitian arrays, respectively.

    Examples
    --------
    >>> from numpy import linalg as LA
    >>> a = np.array([[1, -2j], [2j, 5]])
    >>> LA.eigvalsh(a)
    array([ 0.17157288+0.j,  5.82842712+0.j])

    iiiii����R~sEigenvalues did not converge(R7RFRkRnR\RPRiReRjRR�RIR6tzheevdR�R�R�tdsyevdRRg(RCtUPLORERHR�R�R�tliworktiworkR�R�R�R�tlrworkR�R�((s9/usr/lib64/python2.7/site-packages/numpy/linalg/linalg.pyR+sN/


		cCs4t|�\}}t|j|��}|||fS(N(R\RfRg(RCRHR�((s9/usr/lib64/python2.7/site-packages/numpy/linalg/linalg.pyt
_convertarray�scCs�t|�\}}t|�t|�t|�t|�\}}}t|�}t|�}|jd}td	|�}t	|�rtt
j}t|f|�}t||f|�}	d}
t|
f|�}td|f|�}|tt
|||||d|	||d|d�}
tt|d��}
t|
f|�}|tt
|||||d|	|||
|d�}
n�t
j}t|f|�}t|f|�}t||f|�}d}
t|
f|�}|tt
||||||d|||dd�}
t|d�}
t|
f|�}|tt
||||||d||||
d�}
t|dk�r�|}|}	t|�}n�|d|}t||j�}	t|dk�}x�tt|�d�D]v}||d|d||d|d|	|d|<||d|d||d|d|	|d|d<q�Wt|�}|
ddkrqtd��n|	j�j|�}|j|�||�fS(
s
    Compute the eigenvalues and right eigenvectors of a square array.

    Parameters
    ----------
    a : (M, M) array_like
        A square array of real or complex elements.

    Returns
    -------
    w : (M,) ndarray
        The eigenvalues, each repeated according to its multiplicity.
        The eigenvalues are not necessarily ordered, nor are they
        necessarily real for real arrays (though for real arrays
        complex-valued eigenvalues should occur in conjugate pairs).
    v : (M, M) ndarray
        The normalized (unit "length") eigenvectors, such that the
        column ``v[:,i]`` is the eigenvector corresponding to the
        eigenvalue ``w[i]``.

    Raises
    ------
    LinAlgError
        If the eigenvalue computation does not converge.

    See Also
    --------
    eigvalsh : eigenvalues of a symmetric or Hermitian (conjugate symmetric)
       array.

    eigvals : eigenvalues of a non-symmetric array.

    Notes
    -----
    This is a simple interface to the LAPACK routines dgeev and zgeev
    which compute the eigenvalues and eigenvectors of, respectively,
    general real- and complex-valued square arrays.

    The number `w` is an eigenvalue of `a` if there exists a vector
    `v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and
    `v` satisfy the equations ``dot(a[i,:], v[i]) = w[i] * v[:,i]``
    for :math:`i \in \{0,...,M-1\}`.

    The array `v` of eigenvectors may not be of maximum rank, that is, some
    of the columns may be linearly dependent, although round-off error may
    obscure that fact. If the eigenvalues are all different, then theoretically
    the eigenvectors are linearly independent. Likewise, the (complex-valued)
    matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e.,
    if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate
    transpose of `a`.

    Finally, it is emphasized that `v` consists of the *right* (as in
    right-hand side) eigenvectors of `a`.  A vector `y` satisfying
    ``dot(y.T, a) = z * y.T`` for some number `z` is called a *left*
    eigenvector of `a`, and, in general, the left and right eigenvectors
    of a matrix are not necessarily the (perhaps conjugate) transposes
    of each other.

    References
    ----------
    G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,
    Academic Press, Inc., 1980, Various pp.

    Examples
    --------
    >>> from numpy import linalg as LA

    (Almost) trivial example with real e-values and e-vectors.

    >>> w, v = LA.eig(np.diag((1, 2, 3)))
    >>> w; v
    array([ 1.,  2.,  3.])
    array([[ 1.,  0.,  0.],
           [ 0.,  1.,  0.],
           [ 0.,  0.,  1.]])

    Real matrix possessing complex e-values and e-vectors; note that the
    e-values are complex conjugates of each other.

    >>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))
    >>> w; v
    array([ 1. + 1.j,  1. - 1.j])
    array([[ 0.70710678+0.j        ,  0.70710678+0.j        ],
           [ 0.00000000-0.70710678j,  0.00000000+0.70710678j]])

    Complex-valued matrix with real e-values (but complex-valued e-vectors);
    note that a.conj().T = a, i.e., a is Hermitian.

    >>> a = np.array([[1, 1j], [-1j, 1]])
    >>> w, v = LA.eig(a)
    >>> w; v
    array([  2.00000000e+00+0.j,   5.98651912e-36+0.j]) # i.e., {2, 0}
    array([[ 0.00000000+0.70710678j,  0.70710678+0.j        ],
           [ 0.70710678+0.j        ,  0.00000000+0.70710678j]])

    Be careful about round-off error!

    >>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])
    >>> # Theor. e-values are 1 +/- 1e-9
    >>> w, v = LA.eig(a)
    >>> w; v
    array([ 1.,  1.])
    array([[ 1.,  0.],
           [ 0.,  1.]])

    iiii����gy�?R~sEigenvalues did not converge(i(RFRkRnRoR�ReRPRjRRIR6R�R�t_VR�R�R�R"RMRRRR*RrRbRORRRg(RCRERHR�R�R�R�R�R�tvR�R�R�R�R�R�tvrR�R�tvt((s9/usr/lib64/python2.7/site-packages/numpy/linalg/linalg.pyR�sbk



	!	6>cCs�t|�}t|�\}}t|�t|�t|�\}}t|�}t||�}t|�}|jd}d|d}t	|ft
�}t|�r�tj
}	t	|f|�}
d}t	|f|�}d}
t	|
f|�}|	t|||||
|d|d||d�
}tt|d��}t	|f|�}t|d�}
t	|
f|�}|	t|||||
||||
||d�
}n�tj}	t	|f|�}
d}t	|f|�}|	t|||||
|d||d�}t|d�}t	|f|�}|	t|||||
||||d�}|ddkrdtd��n|j�j|�}|
jt|��||�fS(s�

    Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix.

    Returns two objects, a 1-D array containing the eigenvalues of `a`, and
    a 2-D square array or matrix (depending on the input type) of the
    corresponding eigenvectors (in columns).

    Parameters
    ----------
    a : (M, M) array_like
        A complex Hermitian or real symmetric matrix.
    UPLO : {'L', 'U'}, optional
        Specifies whether the calculation is done with the lower triangular
        part of `a` ('L', default) or the upper triangular part ('U').

    Returns
    -------
    w : (M,) ndarray
        The eigenvalues, not necessarily ordered.
    v : {(M, M) ndarray, (M, M) matrix}
        The column ``v[:, i]`` is the normalized eigenvector corresponding
        to the eigenvalue ``w[i]``.  Will return a matrix object if `a` is
        a matrix object.

    Raises
    ------
    LinAlgError
        If the eigenvalue computation does not converge.

    See Also
    --------
    eigvalsh : eigenvalues of symmetric or Hermitian arrays.
    eig : eigenvalues and right eigenvectors for non-symmetric arrays.
    eigvals : eigenvalues of non-symmetric arrays.

    Notes
    -----
    This is a simple interface to the LAPACK routines dsyevd and zheevd,
    which compute the eigenvalues and eigenvectors of real symmetric and
    complex Hermitian arrays, respectively.

    The eigenvalues of real symmetric or complex Hermitian matrices are
    always real. [1]_ The array `v` of (column) eigenvectors is unitary
    and `a`, `w`, and `v` satisfy the equations
    ``dot(a, v[:, i]) = w[i] * v[:, i]``.

    References
    ----------
    .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
           FL, Academic Press, Inc., 1980, pg. 222.

    Examples
    --------
    >>> from numpy import linalg as LA
    >>> a = np.array([[1, -2j], [2j, 5]])
    >>> a
    array([[ 1.+0.j,  0.-2.j],
           [ 0.+2.j,  5.+0.j]])
    >>> w, v = LA.eigh(a)
    >>> w; v
    array([ 0.17157288,  5.82842712])
    array([[-0.92387953+0.j        , -0.38268343+0.j        ],
           [ 0.00000000+0.38268343j,  0.00000000-0.92387953j]])

    >>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair
    array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j])
    >>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair
    array([ 0.+0.j,  0.+0.j])

    >>> A = np.matrix(a) # what happens if input is a matrix object
    >>> A
    matrix([[ 1.+0.j,  0.-2.j],
            [ 0.+2.j,  5.+0.j]])
    >>> w, v = LA.eigh(A)
    >>> w; v
    array([ 0.17157288,  5.82842712])
    matrix([[-0.92387953+0.j        , -0.38268343+0.j        ],
            [ 0.00000000+0.38268343j,  0.00000000-0.92387953j]])

    iiiii����R~sEigenvalues did not converge(R7RFRkRnR\RPRiReRjRR�RIR6R�R�R�R�R�RRRgRM(RCR�RERHR�R�R�R�R�R�R�R�R�R�R�R�tat((s9/usr/lib64/python2.7/site-packages/numpy/linalg/linalg.pyR
.sPQ


		icCs�t|�\}}t|�t|�|j\}}t|�\}}t|�}t||�}t|�}tt	||�f|�}	|r�|r�|}
|}t
}n$t	||�}
t	||�}t}t|
|f|�}
t||f|�}n0t}d}
d}t
d|�}
t
d|�}tdt	||�ft�}t|�rotj}t	||�tdt	||�ddt||�dt	||�d�}t|f|�}d}t|f|�}|||||||	|
||||d||d�}tt|d��}t|f|�}|||||||	|
|||||||d�}n�tj}d}t|f|�}|||||||	|
||||d|d�}t|d�}t|f|�}|||||||	|
||||||d�}|ddkr7td	��n|	jt|��}	|r�|
j�j|�}
|j�j|�}||
�|	||�fS|	Sd
S(
s�
    Singular Value Decomposition.

    Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v`
    are unitary and `s` is a 1-d array of `a`'s singular values.

    Parameters
    ----------
    a : array_like
        A real or complex matrix of shape (`M`, `N`) .
    full_matrices : bool, optional
        If True (default), `u` and `v` have the shapes (`M`, `M`) and
        (`N`, `N`), respectively.  Otherwise, the shapes are (`M`, `K`)
        and (`K`, `N`), respectively, where `K` = min(`M`, `N`).
    compute_uv : bool, optional
        Whether or not to compute `u` and `v` in addition to `s`.  True
        by default.

    Returns
    -------
    u : ndarray
        Unitary matrix.  The shape of `u` is (`M`, `M`) or (`M`, `K`)
        depending on value of ``full_matrices``.
    s : ndarray
        The singular values, sorted so that ``s[i] >= s[i+1]``.  `s` is
        a 1-d array of length min(`M`, `N`).
    v : ndarray
        Unitary matrix of shape (`N`, `N`) or (`K`, `N`), depending on
        ``full_matrices``.

    Raises
    ------
    LinAlgError
        If SVD computation does not converge.

    Notes
    -----
    The SVD is commonly written as ``a = U S V.H``.  The `v` returned
    by this function is ``V.H`` and ``u = U``.

    If ``U`` is a unitary matrix, it means that it
    satisfies ``U.H = inv(U)``.

    The rows of `v` are the eigenvectors of ``a.H a``. The columns
    of `u` are the eigenvectors of ``a a.H``.  For row ``i`` in
    `v` and column ``i`` in `u`, the corresponding eigenvalue is
    ``s[i]**2``.

    If `a` is a `matrix` object (as opposed to an `ndarray`), then so
    are all the return values.

    Examples
    --------
    >>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)

    Reconstruction based on full SVD:

    >>> U, s, V = np.linalg.svd(a, full_matrices=True)
    >>> U.shape, V.shape, s.shape
    ((9, 6), (6, 6), (6,))
    >>> S = np.zeros((9, 6), dtype=complex)
    >>> S[:6, :6] = np.diag(s)
    >>> np.allclose(a, np.dot(U, np.dot(S, V)))
    True

    Reconstruction based on reduced SVD:

    >>> U, s, V = np.linalg.svd(a, full_matrices=False)
    >>> U.shape, V.shape, s.shape
    ((9, 6), (6, 6), (6,))
    >>> S = np.diag(s)
    >>> np.allclose(a, np.dot(U, np.dot(S, V)))
    True

    iiiiii����iR~sSVD did not convergeN(ii(ii(RFRkRpRjR\RPRiReRRmt_At_SR�RR�RIR6tzgesddRlR�R�tdgesddRRgRMR(RCt
full_matricest
compute_uvRER�R�RHR�R�R�tnutnvttoptiontuR�R�R�R�R�R�R�R�((s9/usr/lib64/python2.7/site-packages/numpy/linalg/linalg.pyR�slL

		O!!	!!cCs^t|�}|dkr:t|dt�}|d|dSt||�tt|�|�SdS(s�
    Compute the condition number of a matrix.

    This function is capable of returning the condition number using
    one of seven different norms, depending on the value of `p` (see
    Parameters below).

    Parameters
    ----------
    x : (M, N) array_like
        The matrix whose condition number is sought.
    p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional
        Order of the norm:

        =====  ============================
        p      norm for matrices
        =====  ============================
        None   2-norm, computed directly using the ``SVD``
        'fro'  Frobenius norm
        inf    max(sum(abs(x), axis=1))
        -inf   min(sum(abs(x), axis=1))
        1      max(sum(abs(x), axis=0))
        -1     min(sum(abs(x), axis=0))
        2      2-norm (largest sing. value)
        -2     smallest singular value
        =====  ============================

        inf means the numpy.inf object, and the Frobenius norm is
        the root-of-sum-of-squares norm.

    Returns
    -------
    c : {float, inf}
        The condition number of the matrix. May be infinite.

    See Also
    --------
    numpy.linalg.norm

    Notes
    -----
    The condition number of `x` is defined as the norm of `x` times the
    norm of the inverse of `x` [1]_; the norm can be the usual L2-norm
    (root-of-sum-of-squares) or one of a number of other matrix norms.

    References
    ----------
    .. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,
           Academic Press, Inc., 1980, pg. 285.

    Examples
    --------
    >>> from numpy import linalg as LA
    >>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])
    >>> a
    array([[ 1,  0, -1],
           [ 0,  1,  0],
           [ 1,  0,  1]])
    >>> LA.cond(a)
    1.4142135623730951
    >>> LA.cond(a, 'fro')
    3.1622776601683795
    >>> LA.cond(a, np.inf)
    2.0
    >>> LA.cond(a, -np.inf)
    1.0
    >>> LA.cond(a, 1)
    2.0
    >>> LA.cond(a, -1)
    1.0
    >>> LA.cond(a, 2)
    1.4142135623730951
    >>> LA.cond(a, -2)
    0.70710678118654746
    >>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0))
    0.70710678118654746

    R�ii����N(RRURRQRR(txtpR�((s9/usr/lib64/python2.7/site-packages/numpy/linalg/linalg.pyR4s
OcCs�t|�}|jdkr*td��n|jdkrPtt|dk��St|dt�}|dkr�|j�t|j	�t
|j�j}nt
||k�S(s�

    Return matrix rank of array using SVD method

    Rank of the array is the number of SVD singular values of the array that are
    greater than `tol`.

    Parameters
    ----------
    M : {(M,), (M, N)} array_like
        array of <=2 dimensions
    tol : {None, float}, optional
       threshold below which SVD values are considered zero. If `tol` is
       None, and ``S`` is an array with singular values for `M`, and
       ``eps`` is the epsilon value for datatype of ``S``, then `tol` is
       set to ``S.max() * max(M.shape) * eps``.

    Notes
    -----
    The default threshold to detect rank deficiency is a test on the magnitude
    of the singular values of `M`.  By default, we identify singular values less
    than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with
    the symbols defined above). This is the algorithm MATLAB uses [1].  It also
    appears in *Numerical recipes* in the discussion of SVD solutions for linear
    least squares [2].

    This default threshold is designed to detect rank deficiency accounting for
    the numerical errors of the SVD computation.  Imagine that there is a column
    in `M` that is an exact (in floating point) linear combination of other
    columns in `M`. Computing the SVD on `M` will not produce a singular value
    exactly equal to 0 in general: any difference of the smallest SVD value from
    0 will be caused by numerical imprecision in the calculation of the SVD.
    Our threshold for small SVD values takes this numerical imprecision into
    account, and the default threshold will detect such numerical rank
    deficiency.  The threshold may declare a matrix `M` rank deficient even if
    the linear combination of some columns of `M` is not exactly equal to
    another column of `M` but only numerically very close to another column of
    `M`.

    We chose our default threshold because it is in wide use.  Other thresholds
    are possible.  For example, elsewhere in the 2007 edition of *Numerical
    recipes* there is an alternative threshold of ``S.max() *
    np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
    this threshold as being based on "expected roundoff error" (p 71).

    The thresholds above deal with floating point roundoff error in the
    calculation of the SVD.  However, you may have more information about the
    sources of error in `M` that would make you consider other tolerance values
    to detect *effective* rank deficiency.  The most useful measure of the
    tolerance depends on the operations you intend to use on your matrix.  For
    example, if your data come from uncertain measurements with uncertainties
    greater than floating point epsilon, choosing a tolerance near that
    uncertainty may be preferable.  The tolerance may be absolute if the
    uncertainties are absolute rather than relative.

    References
    ----------
    .. [1] MATLAB reference documention, "Rank"
           http://www.mathworks.com/help/techdoc/ref/rank.html
    .. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
           "Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
           page 795.

    Examples
    --------
    >>> from numpy.linalg import matrix_rank
    >>> matrix_rank(np.eye(4)) # Full rank matrix
    4
    >>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
    >>> matrix_rank(I)
    3
    >>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
    1
    >>> matrix_rank(np.zeros((4,)))
    0
    is'array should have 2 or fewer dimensionsiR�N(RRqRVR�R"RRQRURlRjR1RRtepsR.(tMttolR;((s9/usr/lib64/python2.7/site-packages/numpy/linalg/linalg.pyR�sL,gV瞯�<cCs�t|�\}}t|�|j�}t|d�\}}}|jd}|jd}|tj|�}xLtt||��D]5}	||	|kr�d||	||	<q�d||	<q�Wt	t
|�t|dd�tft
|���}
||
�S(s�
    Compute the (Moore-Penrose) pseudo-inverse of a matrix.

    Calculate the generalized inverse of a matrix using its
    singular-value decomposition (SVD) and including all
    *large* singular values.

    Parameters
    ----------
    a : (M, N) array_like
      Matrix to be pseudo-inverted.
    rcond : float
      Cutoff for small singular values.
      Singular values smaller (in modulus) than
      `rcond` * largest_singular_value (again, in modulus)
      are set to zero.

    Returns
    -------
    B : (N, M) ndarray
      The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
      is `B`.

    Raises
    ------
    LinAlgError
      If the SVD computation does not converge.

    Notes
    -----
    The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
    defined as: "the matrix that 'solves' [the least-squares problem]
    :math:`Ax = b`," i.e., if :math:`\bar{x}` is said solution, then
    :math:`A^+` is that matrix such that :math:`\bar{x} = A^+b`.

    It can be shown that if :math:`Q_1 \Sigma Q_2^T = A` is the singular
    value decomposition of A, then
    :math:`A^+ = Q_2 \Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
    orthogonal matrices, :math:`\Sigma` is a diagonal matrix consisting
    of A's so-called singular values, (followed, typically, by
    zeros), and then :math:`\Sigma^+` is simply the diagonal matrix
    consisting of the reciprocals of A's singular values
    (again, followed by zeros). [1]_

    References
    ----------
    .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
           FL, Academic Press, Inc., 1980, pp. 139-142.

    Examples
    --------
    The following example checks that ``a * a+ * a == a`` and
    ``a+ * a * a+ == a+``:

    >>> a = np.random.randn(9, 6)
    >>> B = np.linalg.pinv(a)
    >>> np.allclose(a, np.dot(a, np.dot(B, a)))
    True
    >>> np.allclose(B, np.dot(B, np.dot(a, B)))
    True

    iig�?gN(
RFRpt	conjugateRRjR)treduceRrRmR$RR&R (RCtrcondRER�R�R�R�R�tcutoffR�R}((s9/usr/lib64/python2.7/site-packages/numpy/linalg/linalg.pyR�s?


4cCst|�}t|�t|�t|�\}}t||�}t|�}|jd}t|�rrtj	}n	tj
}t|ft�}||||||d�}|d}|dkr�t
d��n)|dkr�|d�t|�t�fSddtj|td|d�k�d}t|�}	t|	�}
|tj|	|
�9}t|
|
�tj|
d	d
�}||fS(sB
    Compute the sign and (natural) logarithm of the determinant of an array.

    If an array has a very small or very large determinant, than a call to
    `det` may overflow or underflow. This routine is more robust against such
    issues, because it computes the logarithm of the determinant rather than
    the determinant itself.

    Parameters
    ----------
    a : array_like
        Input array, has to be a square 2-D array.

    Returns
    -------
    sign : float or complex
        A number representing the sign of the determinant. For a real matrix,
        this is 1, 0, or -1. For a complex matrix, this is a complex number
        with absolute value 1 (i.e., it is on the unit circle), or else 0.
    logdet : float
        The natural log of the absolute value of the determinant.

    If the determinant is zero, then `sign` will be 0 and `logdet` will be
    -Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``.

    See Also
    --------
    det

    Notes
    -----
    The determinant is computed via LU factorization using the LAPACK
    routine z/dgetrf.

    .. versionadded:: 1.6.0.

    Examples
    --------
    The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``:

    >>> a = np.array([[1, 2], [3, 4]])
    >>> (sign, logdet) = np.linalg.slogdet(a)
    >>> (sign, logdet)
    (-1, 0.69314718055994529)
    >>> sign * np.exp(logdet)
    -2.0

    This routine succeeds where ordinary `det` does not:

    >>> np.linalg.det(np.eye(500) * 0.1)
    0.0
    >>> np.linalg.slogdet(np.eye(500) * 0.1)
    (1, -1151.2925464970228)

    iR~s Illegal input to Fortran routinegg�?g@iitaxisi����(RRkRnR\RiReRjRIR6tzgetrftdgetrfRR�RVRMR#R%R�R,R+R2R&R3(RCRHR�R�R�R�R�R~tsigntdtabsdtlogdet((s9/usr/lib64/python2.7/site-packages/numpy/linalg/linalg.pyR	4s08


	
.
cCs t|�\}}|t|�S(sy
    Compute the determinant of an array.

    Parameters
    ----------
    a : (M, M) array_like
        Input array.

    Returns
    -------
    det : float
        Determinant of `a`.

    See Also
    --------
    slogdet : Another way to representing the determinant, more suitable
      for large matrices where underflow/overflow may occur.

    Notes
    -----
    The determinant is computed via LU factorization using the LAPACK
    routine z/dgetrf.

    Examples
    --------
    The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:

    >>> a = np.array([[1, 2], [3, 4]])
    >>> np.linalg.det(a)
    -2.0

    (R	R4(RCR�R�((s9/usr/lib64/python2.7/site-packages/numpy/linalg/linalg.pyR
�s!cCs�ddl}t|�\}}t|�\}}t|j�dk}|rd|dd�tf}nt||�|jd}|jd}|jd}	t||�}
||jdkr�td��nt||�\}}t	|�}
t
|�}t|
|	f|�}|j�|d|jd�d|	�f<t
|||�\}}t||�\}}tt||�f|�}tdt|jtt||��d��d�}tdt||�|dt||�ft�}t|�rOtj}d}t|f|�}t|f|�}||||	||||
||d|d||d�}tt|d��}t|f|�}t||f|�}t|
|	f|�}tj|||	||||
||d|d|d�}t|d�}t|f|�}t|f|�}||||	||||
||d||||d�}n�tj}d}t|f|�}||||	||||
||d|d|d�}t|d�}t|f|�}||||	||||
||d|||d�}|d	dkrtd
��ntg|
�}|r�tt|�| d|dt�}|d
|kr�||kr�t|�r�tttt|�|�d�gd|
�}q�ttt|�|d�gd|
�}q�n�tt|�d|�dd�fd|dt�}|d
|kr�||kr�t|�rwttt|�|d�dd�f�ddd�j|
�}q�tt|�|d�dd�fddd�j|
�}n|t||� j�j|
�}||�||�|d
|fS(sa

    Return the least-squares solution to a linear matrix equation.

    Solves the equation `a x = b` by computing a vector `x` that
    minimizes the Euclidean 2-norm `|| b - a x ||^2`.  The equation may
    be under-, well-, or over- determined (i.e., the number of
    linearly independent rows of `a` can be less than, equal to, or
    greater than its number of linearly independent columns).  If `a`
    is square and of full rank, then `x` (but for round-off error) is
    the "exact" solution of the equation.

    Parameters
    ----------
    a : (M, N) array_like
        "Coefficient" matrix.
    b : {(M,), (M, K)} array_like
        Ordinate or "dependent variable" values. If `b` is two-dimensional,
        the least-squares solution is calculated for each of the `K` columns
        of `b`.
    rcond : float, optional
        Cut-off ratio for small singular values of `a`.
        Singular values are set to zero if they are smaller than `rcond`
        times the largest singular value of `a`.

    Returns
    -------
    x : {(M,), (M, K)} ndarray
        Least-squares solution.  The shape of `x` depends on the shape of
        `b`.
    residuals : {(), (1,), (K,)} ndarray
        Sums of residuals; squared Euclidean 2-norm for each column in
        ``b - a*x``.
        If the rank of `a` is < N or > M, this is an empty array.
        If `b` is 1-dimensional, this is a (1,) shape array.
        Otherwise the shape is (K,).
    rank : int
        Rank of matrix `a`.
    s : (min(M, N),) ndarray
        Singular values of `a`.

    Raises
    ------
    LinAlgError
        If computation does not converge.

    Notes
    -----
    If `b` is a matrix, then all array results are returned as matrices.

    Examples
    --------
    Fit a line, ``y = mx + c``, through some noisy data-points:

    >>> x = np.array([0, 1, 2, 3])
    >>> y = np.array([-1, 0.2, 0.9, 2.1])

    By examining the coefficients, we see that the line should have a
    gradient of roughly 1 and cut the y-axis at, more or less, -1.

    We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``
    and ``p = [[m], [c]]``.  Now use `lstsq` to solve for `p`:

    >>> A = np.vstack([x, np.ones(len(x))]).T
    >>> A
    array([[ 0.,  1.],
           [ 1.,  1.],
           [ 2.,  1.],
           [ 3.,  1.]])

    >>> m, c = np.linalg.lstsq(A, y)[0]
    >>> print m, c
    1.0 -0.95

    Plot the data along with the fitted line:

    >>> import matplotlib.pyplot as plt
    >>> plt.plot(x, y, 'o', label='Original data', markersize=10)
    >>> plt.plot(x, m*x + c, 'r', label='Fitted line')
    >>> plt.legend()
    >>> plt.show()

    i����NiisIncompatible dimensionsg@iiR~s,SVD did not converge in Linear Least SquaresRRtcopytrankiR�(tmathRFRbRjR RkRlRR\RMRPRR�RiReRmR�R3tfloatR�RIR6tzgelsdR�tdgelsdRR!RTR.RRg(RCRvR�R�R�REtis_1dR�R�R�tldbRHR�t
result_real_tR�tbstarR�tnlvlR�R�R�R�R�R�ta_realt
bstar_realR�tresidsR�tst((s9/usr/lib64/python2.7/site-packages/numpy/linalg/linalg.pyR�s�S



)54		"& 482"cCszt|�}|dkr>ttj|j�|j�j��S|j}|dkr8|t	krot
|�j�S|t	kr�t
|�j�S|dkr�|dkj
�S|dkr�t
|�j
�S|dkr�t|j�|jj
��Sy|dWntk
rtd��nXt
|�|j
�d|Sn>|dkrj|dkrft|dd�j�S|dkr�t|dd�j�S|dkr�t
|�j
dd�j�S|t	kr�t
|�j
dd�j�S|d	krt
|�j
dd�j�S|t	kr)t
|�j
dd�j�S|dkr[ttj|j�|jj���Std��ntd
��dS(s�

    Matrix or vector norm.

    This function is able to return one of seven different matrix norms,
    or one of an infinite number of vector norms (described below), depending
    on the value of the ``ord`` parameter.

    Parameters
    ----------
    x : {(M,), (M, N)} array_like
        Input array.
    ord : {non-zero int, inf, -inf, 'fro'}, optional
        Order of the norm (see table under ``Notes``). inf means numpy's
        `inf` object.

    Returns
    -------
    n : float
        Norm of the matrix or vector.

    Notes
    -----
    For values of ``ord <= 0``, the result is, strictly speaking, not a
    mathematical 'norm', but it may still be useful for various numerical
    purposes.

    The following norms can be calculated:

    =====  ============================  ==========================
    ord    norm for matrices             norm for vectors
    =====  ============================  ==========================
    None   Frobenius norm                2-norm
    'fro'  Frobenius norm                --
    inf    max(sum(abs(x), axis=1))      max(abs(x))
    -inf   min(sum(abs(x), axis=1))      min(abs(x))
    0      --                            sum(x != 0)
    1      max(sum(abs(x), axis=0))      as below
    -1     min(sum(abs(x), axis=0))      as below
    2      2-norm (largest sing. value)  as below
    -2     smallest singular value       as below
    other  --                            sum(abs(x)**ord)**(1./ord)
    =====  ============================  ==========================

    The Frobenius norm is given by [1]_:

        :math:`||A||_F = [\sum_{i,j} abs(a_{i,j})^2]^{1/2}`

    References
    ----------
    .. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
           Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15

    Examples
    --------
    >>> from numpy import linalg as LA
    >>> a = np.arange(9) - 4
    >>> a
    array([-4, -3, -2, -1,  0,  1,  2,  3,  4])
    >>> b = a.reshape((3, 3))
    >>> b
    array([[-4, -3, -2],
           [-1,  0,  1],
           [ 2,  3,  4]])

    >>> LA.norm(a)
    7.745966692414834
    >>> LA.norm(b)
    7.745966692414834
    >>> LA.norm(b, 'fro')
    7.745966692414834
    >>> LA.norm(a, np.inf)
    4
    >>> LA.norm(b, np.inf)
    9
    >>> LA.norm(a, -np.inf)
    0
    >>> LA.norm(b, -np.inf)
    2

    >>> LA.norm(a, 1)
    20
    >>> LA.norm(b, 1)
    7
    >>> LA.norm(a, -1)
    -4.6566128774142013e-010
    >>> LA.norm(b, -1)
    6
    >>> LA.norm(a, 2)
    7.745966692414834
    >>> LA.norm(b, 2)
    7.3484692283495345

    >>> LA.norm(a, -2)
    nan
    >>> LA.norm(b, -2)
    1.8570331885190563e-016
    >>> LA.norm(a, 3)
    5.8480354764257312
    >>> LA.norm(a, -3)
    nan

    iiisInvalid norm order for vectors.g�?R�i����R�i����tfrotfs Invalid norm order for matrices.s&Improper number of dimensions to norm.N(R�R�(RRUR(R%R�tconjR!trealRqR#R�RlRmR.RVR�R(R�tordtnd((s9/usr/lib64/python2.7/site-packages/numpy/linalg/linalg.pyRLsJg&	


&N(WR?t__all__t
numpy.coreRRRRRRRRRRRRR R!R"R#R$R%R&R'R(R)R*R+R,R-R.R/R0R1R2R3R4t	numpy.libR5tnumpy.linalgR6tnumpy.matrixlib.defmatrixRtnumpy.compatR7R�R�R�R�R�R�t	ExceptionRRFRIRJRNRMRORPR\RfReRiRkRnRoRpRURRRRRRRRR�RR
RRRRR	R
RR(((s9/usr/lib64/python2.7/site-packages/numpy/linalg/linalg.pyt<module>
sx	�		



	

							E	[E	.	Z�	kX			�~�WYP	R	&�