o
    ?en&                     @   s.  d Z ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlm	Z	 ddlm
Z
 dd	lmZ dd
lmZ ddlmZ ddlmZ d'ddZdd Zejdd ZedgdejjfddZedgddejjfddZedgdd(ddZedgdd)d d!Zed"gd	#	$	d*d%d&ZdS )+z0Utilities for manipulating the loss collections.    )context)constant_op)dtypes)ops)	array_ops)	check_opscond)confusion_matrix)math_ops)tf_contextlib)	tf_exportNc                    s  j }|j}dur_j }|j}|dur-|dur-|| dks$|d dkr,t\n2tt fddtdt d fdd}ttd|\du rgfS j }|j}	|	dkrvfS |dur|	dur|	| dkrt	dgn||	 dkrt
dgfS t}
|
t fddfd	d
  fdd}tt|
dfdd|fS )a  Squeeze or expand last dimension if needed.

  1. Squeezes last dim of `y_pred` or `y_true` if their rank differs by 1
  (using `confusion_matrix.remove_squeezable_dimensions`).
  2. Squeezes or expands last dim of `sample_weight` if its rank differs by 1
  from the new rank of `y_pred`.
  If `sample_weight` is scalar, it is kept scalar.

  This will use static shape if available. Otherwise, it will add graph
  operations, which could result in a performance hit.

  Args:
    y_pred: Predicted values, a `Tensor` of arbitrary dimensions.
    y_true: Optional label `Tensor` whose dimensions match `y_pred`.
    sample_weight: Optional weight scalar or `Tensor` whose dimensions match
      `y_pred`.

  Returns:
    Tuple of `y_pred`, `y_true` and `sample_weight`. Each of them possibly has
    the last dimension squeezed,
    `sample_weight` could be extended by one dimension.
    If `sample_weight` is None, (y_pred, y_true) is returned.
  N   c                      s   t  S N)r
   remove_squeezable_dimensions y_predy_truer   b/home/www/facesmatcher.com/pyenv/lib/python3.10/site-packages/tensorflow/python/ops/losses/util.py<lambda>H   s    z.squeeze_or_expand_dimensions.<locals>.<lambda>c                      s   t   fddS )Nc                      s    fS r   r   r   r   r   r   r   L   s    z@squeeze_or_expand_dimensions.<locals>.<lambda>.<locals>.<lambda>r   r   )is_last_dim_1squeeze_dimsr   r   r   r   r   K   s    r   c                         t  dgS Nr   )r   squeezer   sample_weightr   r   r   c       c                     s*   fdd} t  t d| fddS )Nc                      r   r   )r   expand_dimsr   r   r   r   r   f   r   zMsqueeze_or_expand_dimensions.<locals>._maybe_expand_weights.<locals>.<lambda>r   c                          S r   r   r   r   r   r   r   h       r	   r   equal)Zexpand_weights)	rank_diffr   r   r   _maybe_expand_weightse   s   z;squeeze_or_expand_dimensions.<locals>._maybe_expand_weightsc                      s   t  td S )Nr   r#   r   )r&   maybe_squeeze_weightsr%   r   r   _maybe_adjust_weightsj   s   z;squeeze_or_expand_dimensions.<locals>._maybe_adjust_weightsc                      r!   r   r   r   r   r   r   r   r   r"   )shapeZndimsr
   r   r   rankr   r$   r	   r   r    )r   r   r   Zy_pred_shapeZy_pred_rankZy_true_shapeZy_true_rankZmaybe_squeeze_dimsZweights_shapeZweights_rankZweights_rank_tensorr(   r   )r&   r   r'   r%   r   r   r   r   r   squeeze_or_expand_dimensions   sP   



r+   c                 C   s:   t | tj} t |tj}t| d|\} }}t | |S )aH  Scales loss values by the given sample weights.

  `sample_weight` dimensions are updated to match with the dimension of `losses`
  if possible by using squeeze/expand/broadcast.

  Args:
    losses: Loss tensor.
    sample_weight: Sample weights tensor.

  Returns:
    `losses` scaled by `sample_weight` with dtype float32.
  N)r   castr   Zfloat32r+   multiply)lossesr   _r   r   r   scale_losses_by_sample_weightw   s   
r0   c                 c   s    | j j}|dur|dkrtd|  d| dV  dS ttjt| tj	dt
jdddg dV  W d   dS 1 s@w   Y  dS )	zContext manager that checks that the rank of per_example_loss is at least 1.

  Args:
    per_example_loss: Per example loss tensor.

  Yields:
    A context manager.
  Nr   zoInvalid value passed for `per_example_loss`. Expected a tensor with at least rank 1. Received per_example_loss=z with rank r   )ZdtypezTInvalid value passed for `per_example_loss`. Expected a tensor with at least rank 1.)message)r)   r*   
ValueErrorr   Zcontrol_dependenciesr   Zassert_greater_equalr   r   r,   r   Zint32)Zper_example_lossZ	loss_rankr   r   r   check_per_example_loss_rank   s*   

"r3   zlosses.add_loss)v1c                 C   s$   |rt  st||  dS dS dS )zAdds a externally defined loss to the collection of losses.

  Args:
    loss: A loss `Tensor`.
    loss_collection: Optional collection to add the loss to.
  N)r   Zexecuting_eagerlyr   Zadd_to_collection)Zlossloss_collectionr   r   r   add_loss   s   r6   zlosses.get_lossesc                 C   s   t || S )zGets the list of losses from the loss_collection.

  Args:
    scope: An optional scope name for filtering the losses to return.
    loss_collection: Optional losses collection.

  Returns:
    a list of loss tensors.
  )r   get_collection)scoper5   r   r   r   
get_losses   s   r9   z losses.get_regularization_lossesc                 C   s   t t jj| S )zGets the list of regularization losses.

  Args:
    scope: An optional scope name for filtering the losses to return.

  Returns:
    A list of regularization losses as Tensors.
  )r   r7   	GraphKeysZREGULARIZATION_LOSSESr8   r   r   r   get_regularization_losses   s   
r<   zlosses.get_regularization_losstotal_regularization_lossc                 C   s$   t | }|rtj||dS tdS )zGets the total regularization loss.

  Args:
    scope: An optional scope name for filtering the losses to return.
    name: The name of the returned tensor.

  Returns:
    A scalar regularization loss.
  nameg        )r<   r   add_nr   Zconstant)r8   r?   r.   r   r   r   get_regularization_loss   s   
rA   zlosses.get_total_lossT
total_lossc                 C   s*   t |d}| r|t|d7 }tj||dS )a~  Returns a tensor whose value represents the total loss.

  In particular, this adds any losses you have added with `tf.add_loss()` to
  any regularization losses that have been added by regularization parameters
  on layers constructors e.g. `tf.layers`. Be very sure to use this if you
  are constructing a loss_op manually. Otherwise regularization arguments
  on `tf.layers` methods will not function.

  Args:
    add_regularization_losses: A boolean indicating whether or not to use the
      regularization losses in the sum.
    name: The name of the returned tensor.
    scope: An optional scope name for filtering the losses to return. Note that
      this filters the losses added with `tf.add_loss()` as well as the
      regularization losses to that scope.

  Returns:
    A `Tensor` whose value represents the total loss.

  Raises:
    ValueError: if `losses` is not iterable.
  r;   r>   )r9   r<   r   r@   )Zadd_regularization_lossesr?   r8   r.   r   r   r   get_total_loss   s   
rC   )NNr   )Nr=   )TrB   N)__doc__Ztensorflow.python.eagerr   Ztensorflow.python.frameworkr   r   r   Ztensorflow.python.opsr   r   r	   r
   r   Ztensorflow.python.utilr   Z tensorflow.python.util.tf_exportr   r+   r0   contextmanagerr3   r:   ZLOSSESr6   r9   r<   rA   rC   r   r   r   r   <module>   s:   
Y





