o
    7?e^D                     @   s  d Z ddlm  mZ ddlmZ ddlmZ	 ddl
mZ ddlmZ ddlmZ edG d	d
 d
ejZedG dd dejZedG dd dejZedG dd dejZdZeej_ edG dd dejZedG dd dejZeej_ dd Zedejjjd.dd Zed!ejjjd"d# Zed$ejjjd%d& Zed'ejjjd/d)d*Z ed+ejjjd/d,d-Z!dS )0zAccuracy metrics.    N)backend)utils)base_metric)metrics_utils)keras_exportzkeras.metrics.Accuracyc                       (   e Zd ZdZejd fdd	Z  ZS )Accuracya9  Calculates how often predictions equal labels.

    This metric creates two local variables, `total` and `count` that are used
    to compute the frequency with which `y_pred` matches `y_true`. This
    frequency is ultimately returned as `binary accuracy`: an idempotent
    operation that simply divides `total` by `count`.

    If `sample_weight` is `None`, weights default to 1.
    Use `sample_weight` of 0 to mask values.

    Args:
      name: (Optional) string name of the metric instance.
      dtype: (Optional) data type of the metric result.

    Standalone usage:

    >>> m = tf.keras.metrics.Accuracy()
    >>> m.update_state([[1], [2], [3], [4]], [[0], [2], [3], [4]])
    >>> m.result().numpy()
    0.75

    >>> m.reset_state()
    >>> m.update_state([[1], [2], [3], [4]], [[0], [2], [3], [4]],
    ...                sample_weight=[1, 1, 0, 0])
    >>> m.result().numpy()
    0.5

    Usage with `compile()` API:

    ```python
    model.compile(optimizer='sgd',
                  loss='mse',
                  metrics=[tf.keras.metrics.Accuracy()])
    ```
    accuracyNc                    s   t  jt||d d S Ndtype)super__init__r	   selfnamer   	__class__ c/home/www/facesmatcher.com/pyenv/lib/python3.10/site-packages/keras/src/metrics/accuracy_metrics.pyr   B   s   zAccuracy.__init__)r	   N__name__
__module____qualname____doc__dtensor_utilsZinject_meshr   __classcell__r   r   r   r   r      s    $r   zkeras.metrics.BinaryAccuracyc                       (   e Zd ZdZejd fdd	Z  ZS )BinaryAccuracya  Calculates how often predictions match binary labels.

    This metric creates two local variables, `total` and `count` that are used
    to compute the frequency with which `y_pred` matches `y_true`. This
    frequency is ultimately returned as `binary accuracy`: an idempotent
    operation that simply divides `total` by `count`.

    If `sample_weight` is `None`, weights default to 1.
    Use `sample_weight` of 0 to mask values.

    Args:
      name: (Optional) string name of the metric instance.
      dtype: (Optional) data type of the metric result.
      threshold: (Optional) Float representing the threshold for deciding
      whether prediction values are 1 or 0.

    Standalone usage:

    >>> m = tf.keras.metrics.BinaryAccuracy()
    >>> m.update_state([[1], [1], [0], [0]], [[0.98], [1], [0], [0.6]])
    >>> m.result().numpy()
    0.75

    >>> m.reset_state()
    >>> m.update_state([[1], [1], [0], [0]], [[0.98], [1], [0], [0.6]],
    ...                sample_weight=[1, 0, 0, 1])
    >>> m.result().numpy()
    0.5

    Usage with `compile()` API:

    ```python
    model.compile(optimizer='sgd',
                  loss='mse',
                  metrics=[tf.keras.metrics.BinaryAccuracy()])
    ```
    binary_accuracyN      ?c                    s   t  jtj|||d d S )N)r   	threshold)r   r   r   binary_matches)r   r   r   r!   r   r   r   r   o   s   

zBinaryAccuracy.__init__)r   Nr    r   r   r   r   r   r   G   s    &r   z!keras.metrics.CategoricalAccuracyc                       r   )CategoricalAccuracya  Calculates how often predictions match one-hot labels.

    You can provide logits of classes as `y_pred`, since argmax of
    logits and probabilities are same.

    This metric creates two local variables, `total` and `count` that are used
    to compute the frequency with which `y_pred` matches `y_true`. This
    frequency is ultimately returned as `categorical accuracy`: an idempotent
    operation that simply divides `total` by `count`.

    `y_pred` and `y_true` should be passed in as vectors of probabilities,
    rather than as labels. If necessary, use `tf.one_hot` to expand `y_true` as
    a vector.

    If `sample_weight` is `None`, weights default to 1.
    Use `sample_weight` of 0 to mask values.

    Args:
      name: (Optional) string name of the metric instance.
      dtype: (Optional) data type of the metric result.

    Standalone usage:

    >>> m = tf.keras.metrics.CategoricalAccuracy()
    >>> m.update_state([[0, 0, 1], [0, 1, 0]], [[0.1, 0.9, 0.8],
    ...                 [0.05, 0.95, 0]])
    >>> m.result().numpy()
    0.5

    >>> m.reset_state()
    >>> m.update_state([[0, 0, 1], [0, 1, 0]], [[0.1, 0.9, 0.8],
    ...                 [0.05, 0.95, 0]],
    ...                sample_weight=[0.7, 0.3])
    >>> m.result().numpy()
    0.3

    Usage with `compile()` API:

    ```python
    model.compile(
      optimizer='sgd',
      loss='mse',
      metrics=[tf.keras.metrics.CategoricalAccuracy()])
    ```
    categorical_accuracyNc                    s   t  jdd ||d d S )Nc                 S      t tjj| dd|S NZaxisr   sparse_categorical_matchestfmathargmaxy_truey_predr   r   r   <lambda>   s    z.CategoricalAccuracy.__init__.<locals>.<lambda>r   r   r   r   r   r   r   r      s
   
zCategoricalAccuracy.__init__)r$   Nr   r   r   r   r   r#   v   s    .r#   z'keras.metrics.SparseCategoricalAccuracyc                       r   )SparseCategoricalAccuracya7  Calculates how often predictions match integer labels.

    ```python
    acc = np.dot(sample_weight, np.equal(y_true, np.argmax(y_pred, axis=1))
    ```

    You can provide logits of classes as `y_pred`, since argmax of
    logits and probabilities are same.

    This metric creates two local variables, `total` and `count` that are used
    to compute the frequency with which `y_pred` matches `y_true`. This
    frequency is ultimately returned as `sparse categorical accuracy`: an
    idempotent operation that simply divides `total` by `count`.

    If `sample_weight` is `None`, weights default to 1.
    Use `sample_weight` of 0 to mask values.

    Args:
      name: (Optional) string name of the metric instance.
      dtype: (Optional) data type of the metric result.

    Standalone usage:

    >>> m = tf.keras.metrics.SparseCategoricalAccuracy()
    >>> m.update_state([[2], [1]], [[0.1, 0.6, 0.3], [0.05, 0.95, 0]])
    >>> m.result().numpy()
    0.5

    >>> m.reset_state()
    >>> m.update_state([[2], [1]], [[0.1, 0.6, 0.3], [0.05, 0.95, 0]],
    ...                sample_weight=[0.7, 0.3])
    >>> m.result().numpy()
    0.3

    Usage with `compile()` API:

    ```python
    model.compile(
        optimizer='sgd',
        loss='mse',
        metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
    ```
    sparse_categorical_accuracyNc                    s   t  jtj||d d S r
   )r   r   r   r*   r   r   r   r   r      s   
z"SparseCategoricalAccuracy.__init__)r4   Nr   r   r   r   r   r3      s    ,r3   a  Accumulates metric statistics.

For sparse categorical metrics, the shapes of `y_true` and `y_pred` are
different.

Args:
  y_true: Ground truth label values. shape = `[batch_size, d0, .. dN-1]` or
    shape = `[batch_size, d0, .. dN-1, 1]`.
  y_pred: The predicted probability values. shape = `[batch_size, d0, .. dN]`.
  sample_weight: Optional `sample_weight` acts as a
    coefficient for the metric. If a scalar is provided, then the metric is
    simply scaled by the given value. If `sample_weight` is a tensor of size
    `[batch_size]`, then the metric for each sample of the batch is rescaled
    by the corresponding element in the `sample_weight` vector. If the shape
    of `sample_weight` is `[batch_size, d0, .. dN-1]` (or can be broadcasted
    to this shape), then each metric element of `y_pred` is scaled by the
    corresponding value of `sample_weight`. (Note on `dN-1`: all metric
    functions reduce by 1 dimension, usually the last axis (-1)).

Returns:
  Update op.
z%keras.metrics.TopKCategoricalAccuracyc                       r   )TopKCategoricalAccuracya  Computes how often targets are in the top `K` predictions.

    Args:
      k: (Optional) Number of top elements to look at for computing accuracy.
        Defaults to `5`.
      name: (Optional) string name of the metric instance.
      dtype: (Optional) data type of the metric result.

    Standalone usage:

    >>> m = tf.keras.metrics.TopKCategoricalAccuracy(k=1)
    >>> m.update_state([[0, 0, 1], [0, 1, 0]],
    ...                [[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
    >>> m.result().numpy()
    0.5

    >>> m.reset_state()
    >>> m.update_state([[0, 0, 1], [0, 1, 0]],
    ...                [[0.1, 0.9, 0.8], [0.05, 0.95, 0]],
    ...                sample_weight=[0.7, 0.3])
    >>> m.result().numpy()
    0.3

    Usage with `compile()` API:

    ```python
    model.compile(optimizer='sgd',
                  loss='mse',
                  metrics=[tf.keras.metrics.TopKCategoricalAccuracy()])
    ```
       top_k_categorical_accuracyNc                    s   t  jdd |||d d S )Nc                 S      t tjj| dd||S r&   r    sparse_top_k_categorical_matchesr+   r,   r-   )ZytZypkr   r   r   r1   '  s    z2TopKCategoricalAccuracy.__init__.<locals>.<lambda>r   r;   r2   r   r;   r   r   r   r   r   r   $  s   
z TopKCategoricalAccuracy.__init__)r6   r7   Nr   r   r   r   r   r5     s     r5   z+keras.metrics.SparseTopKCategoricalAccuracyc                       s*   e Zd ZdZej	d fdd	Z  ZS )SparseTopKCategoricalAccuracyaP  Computes how often integer targets are in the top `K` predictions.

    Args:
      k: (Optional) Number of top elements to look at for computing accuracy.
        Defaults to `5`.
      name: (Optional) string name of the metric instance.
      dtype: (Optional) data type of the metric result.

    Standalone usage:

    >>> m = tf.keras.metrics.SparseTopKCategoricalAccuracy(k=1)
    >>> m.update_state([2, 1], [[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
    >>> m.result().numpy()
    0.5

    >>> m.reset_state()
    >>> m.update_state([2, 1], [[0.1, 0.9, 0.8], [0.05, 0.95, 0]],
    ...                sample_weight=[0.7, 0.3])
    >>> m.result().numpy()
    0.3

    Usage with `compile()` API:

    ```python
    model.compile(
      optimizer='sgd',
      loss='mse',
      metrics=[tf.keras.metrics.SparseTopKCategoricalAccuracy()])
    ```
    r6   !sparse_top_k_categorical_accuracyNc                    s   t  jtj|||d d S )Nr<   )r   r   r   r:   r=   r   r   r   r   Q  s   
z&SparseTopKCategoricalAccuracy.__init__)r6   r?   Nr   r   r   r   r   r>   0  s
    r>   c                 C   sV   t || g\\}} }| j|j | j|jkrt|| j}tt| |t	 S )N)
r   Z,ragged_assert_compatible_and_get_flat_valuesshapeZassert_is_compatible_withr   r+   castequalr   Zfloatx)r/   r0   _r   r   r   r	   b  s   r	   zkeras.metrics.binary_accuracyr    c                 C   s   t jt| ||ddS )a  Calculates how often predictions match binary labels.

    Standalone usage:
    >>> y_true = [[1], [1], [0], [0]]
    >>> y_pred = [[1], [1], [0], [0]]
    >>> m = tf.keras.metrics.binary_accuracy(y_true, y_pred)
    >>> assert m.shape == (4,)
    >>> m.numpy()
    array([1., 1., 1., 1.], dtype=float32)

    Args:
      y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
      y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
      threshold: (Optional) Float representing the threshold for deciding
        whether prediction values are 1 or 0.

    Returns:
      Binary accuracy values. shape = `[batch_size, d0, .. dN-1]`
    r'   r(   )r+   Zreduce_meanr   r"   )r/   r0   r!   r   r   r   r   o  s   r   z"keras.metrics.categorical_accuracyc                 C   r%   )a;  Calculates how often predictions match one-hot labels.

    Standalone usage:
    >>> y_true = [[0, 0, 1], [0, 1, 0]]
    >>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]]
    >>> m = tf.keras.metrics.categorical_accuracy(y_true, y_pred)
    >>> assert m.shape == (2,)
    >>> m.numpy()
    array([0., 1.], dtype=float32)

    You can provide logits of classes as `y_pred`, since argmax of
    logits and probabilities are same.

    Args:
      y_true: One-hot ground truth values.
      y_pred: The prediction values.

    Returns:
      Categorical accuracy values.
    r'   r(   r)   r.   r   r   r   r$     s   r$   z)keras.metrics.sparse_categorical_accuracyc                 C   s8   t | |}|jjdkr|jd dkrt|dg}|S )a9  Calculates how often predictions match integer labels.

    Standalone usage:
    >>> y_true = [2, 1]
    >>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]]
    >>> m = tf.keras.metrics.sparse_categorical_accuracy(y_true, y_pred)
    >>> assert m.shape == (2,)
    >>> m.numpy()
    array([0., 1.], dtype=float32)

    You can provide logits of classes as `y_pred`, since argmax of
    logits and probabilities are same.

    Args:
      y_true: Integer ground truth values.
      y_pred: The prediction values.

    Returns:
      Sparse categorical accuracy values.
       r'   )r   r*   r@   Zndimsr+   Zsqueeze)r/   r0   matchesr   r   r   r4     s   r4   z(keras.metrics.top_k_categorical_accuracyr6   c                 C   r8   )aG  Computes how often targets are in the top `K` predictions.

    Standalone usage:
    >>> y_true = [[0, 0, 1], [0, 1, 0]]
    >>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]]
    >>> m = tf.keras.metrics.top_k_categorical_accuracy(y_true, y_pred, k=3)
    >>> assert m.shape == (2,)
    >>> m.numpy()
    array([1., 1.], dtype=float32)

    Args:
      y_true: The ground truth values.
      y_pred: The prediction values.
      k: (Optional) Number of top elements to look at for computing accuracy.
        Defaults to `5`.

    Returns:
      Top K categorical accuracy value.
    r'   r(   r9   r/   r0   r;   r   r   r   r7     s   r7   z/keras.metrics.sparse_top_k_categorical_accuracyc                 C   s   t | ||S )a_  Computes how often integer targets are in the top `K` predictions.

    Standalone usage:
    >>> y_true = [2, 1]
    >>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]]
    >>> m = tf.keras.metrics.sparse_top_k_categorical_accuracy(
    ...     y_true, y_pred, k=3)
    >>> assert m.shape == (2,)
    >>> m.numpy()
    array([1., 1.], dtype=float32)

    Args:
      y_true: tensor of true targets.
      y_pred: tensor of predicted targets.
      k: (Optional) Number of top elements to look at for computing accuracy.
        Defaults to `5`.

    Returns:
      Sparse top K categorical accuracy value.
    )r   r:   rF   r   r   r   r?     s   r?   )r    )r6   )"r   Ztensorflow.compat.v2compatv2r+   Z	keras.srcr   Zkeras.src.dtensorr   r   Zkeras.src.metricsr   Zkeras.src.utilsr   Z tensorflow.python.util.tf_exportr   ZMeanMetricWrapperr   r   r#   r3   Z*_SPARSE_CATEGORICAL_UPDATE_STATE_DOCSTRINGZupdate_stater5   r>   r	   Z__internal__dispatchZadd_dispatch_supportr   r$   r4   r7   r?   r   r   r   r   <module>   sP   *.:4--#