o
    ?e6                     @   s   d Z ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlm	Z	 ddl
mZ dd	lmZ dd
lmZ dd ZG dd de	ZG dd de	ZG dd de	ZG dd de	Zdd ZG dd de	ZG dd de	ZdS )z(Layers that act as activation functions.    )dtypes)backend)constraints)initializers)regularizers)Layer)	InputSpec)tf_utils)math_opsc                   C   s   t  S N)globals r   r   t/home/www/facesmatcher.com/pyenv/lib/python3.10/site-packages/tensorflow/python/keras/layers/advanced_activations.pyget_globals   s   r   c                       D   e Zd ZdZd fdd	Zdd Z fddZejd	d
 Z	  Z
S )	LeakyReLUa  Leaky version of a Rectified Linear Unit.

  It allows a small gradient when the unit is not active:

  ```
    f(x) = alpha * x if x < 0
    f(x) = x if x >= 0
  ```

  Usage:

  >>> layer = tf.keras.layers.LeakyReLU()
  >>> output = layer([-3.0, -1.0, 0.0, 2.0])
  >>> list(output.numpy())
  [-0.9, -0.3, 0.0, 2.0]
  >>> layer = tf.keras.layers.LeakyReLU(alpha=0.1)
  >>> output = layer([-3.0, -1.0, 0.0, 2.0])
  >>> list(output.numpy())
  [-0.3, -0.1, 0.0, 2.0]

  Input shape:
    Arbitrary. Use the keyword argument `input_shape`
    (tuple of integers, does not include the batch axis)
    when using this layer as the first layer in a model.

  Output shape:
    Same shape as the input.

  Args:
    alpha: Float >= 0. Negative slope coefficient. Default to 0.3.

  333333?c                    @   t t| jdi | |d u rtd| d| _t|| _d S )NzKThe alpha value of a Leaky ReLU layer cannot be None, needs a float. Got %sTr   )superr   __init__
ValueErrorsupports_maskingr   cast_to_floatxalphaselfr   kwargs	__class__r   r   r   C   s   zLeakyReLU.__init__c                 C   s   t j|| jdS )N)r   r   relur   r   inputsr   r   r   callL   s   zLeakyReLU.callc                    8   dt | ji}tt|  }tt| t|  S Nr   )floatr   r   r   
get_configdictlistitemsr   configZbase_configr   r   r   r'   O      zLeakyReLU.get_configc                 C      |S r   r   r   input_shaper   r   r   compute_output_shapeT      zLeakyReLU.compute_output_shape)r   __name__
__module____qualname____doc__r   r#   r'   r	   shape_type_conversionr1   __classcell__r   r   r   r   r   !   s    !	r   c                       sZ   e Zd ZdZ				d fdd	Zejdd Zdd	 Z fd
dZ	ejdd Z
  ZS )PReLUa  Parametric Rectified Linear Unit.

  It follows:

  ```
    f(x) = alpha * x for x < 0
    f(x) = x for x >= 0
  ```

  where `alpha` is a learned array with the same shape as x.

  Input shape:
    Arbitrary. Use the keyword argument `input_shape`
    (tuple of integers, does not include the samples axis)
    when using this layer as the first layer in a model.

  Output shape:
    Same shape as the input.

  Args:
    alpha_initializer: Initializer function for the weights.
    alpha_regularizer: Regularizer for the weights.
    alpha_constraint: Constraint for the weights.
    shared_axes: The axes along which to share learnable
      parameters for the activation function.
      For example, if the incoming feature maps
      are from a 2D convolution
      with output shape `(batch, height, width, channels)`,
      and you wish to share parameters across space
      so that each filter only has one set of parameters,
      set `shared_axes=[1, 2]`.
  zerosNc                    sz   t t| jdi | d| _t|| _t|| _t	|| _
|d u r)d | _d S t|ttfs6|g| _d S t|| _d S NTr   )r   r:   r   r   r   getalpha_initializerr   alpha_regularizerr   alpha_constraintshared_axes
isinstancer)   tuple)r   r>   r?   r@   rA   r   r   r   r   r   {   s   
zPReLU.__init__c                 C   s   t |dd  }| jd ur| jD ]}d||d < q| j|d| j| j| jd| _i }| jrAtdt|D ]}|| jvr@|| ||< q3t	t||d| _
d| _d S )N   r   )shapenameZinitializerZregularizer
constraint)ndimaxesT)r)   rA   Z
add_weightr>   r?   r@   r   rangelenr   Z
input_specZbuilt)r   r0   Zparam_shapeirI   r   r   r   build   s&   



zPReLU.buildc                 C   s&   t |}| j t |  }|| S r   r   )r   r"   posnegr   r   r   r#      s   
z
PReLU.callc                    sR   t | jt| jt| j| jd}tt	| 
 }tt| t|  S )N)r>   r?   r@   rA   )r   	serializer>   r   r?   r   r@   rA   r   r:   r'   r(   r)   r*   r+   r   r   r   r'      s   


zPReLU.get_configc                 C   r.   r   r   r/   r   r   r   r1      r2   zPReLU.compute_output_shape)r;   NNN)r4   r5   r6   r7   r   r	   r8   rM   r#   r'   r1   r9   r   r   r   r   r:   Y   s    "

r:   c                       r   )ELUa  Exponential Linear Unit.

  It follows:

  ```
    f(x) =  alpha * (exp(x) - 1.) for x < 0
    f(x) = x for x >= 0
  ```

  Input shape:
    Arbitrary. Use the keyword argument `input_shape`
    (tuple of integers, does not include the samples axis)
    when using this layer as the first layer in a model.

  Output shape:
    Same shape as the input.

  Args:
    alpha: Scale for the negative factor.
        ?c                    r   )Nz>Alpha of an ELU layer cannot be None, requires a float. Got %sTr   )r   rQ   r   r   r   r   r   r   r   r   r   r   r      s   zELU.__init__c                 C   s   t || jS r   )r   Zelur   r!   r   r   r   r#      s   zELU.callc                    r$   r%   )r&   r   r   rQ   r'   r(   r)   r*   r+   r   r   r   r'      r-   zELU.get_configc                 C   r.   r   r   r/   r   r   r   r1      r2   zELU.compute_output_shaperR   r3   r   r   r   r   rQ      s    rQ   c                       r   )ThresholdedReLUa  Thresholded Rectified Linear Unit.

  It follows:

  ```
    f(x) = x for x > theta
    f(x) = 0 otherwise`
  ```

  Input shape:
    Arbitrary. Use the keyword argument `input_shape`
    (tuple of integers, does not include the samples axis)
    when using this layer as the first layer in a model.

  Output shape:
    Same shape as the input.

  Args:
    theta: Float >= 0. Threshold location of activation.
  rR   c                    sT   t t| jdi | |d u rtd| |dk rtd| d| _t|| _d S )NzJTheta of a Thresholded ReLU layer cannot be None, requires a float. Got %sr   zAThe theta value of a Thresholded ReLU layer should be >=0, got %sTr   )r   rT   r   r   r   r   r   theta)r   rU   r   r   r   r   r      s   zThresholdedReLU.__init__c                 C   s*   t | j|j}|t t |||j S r   )r
   castrU   dtypeZgreater)r   r"   rU   r   r   r   r#     s   zThresholdedReLU.callc                    r$   )NrU   )r&   rU   r   rT   r'   r(   r)   r*   r+   r   r   r   r'     r-   zThresholdedReLU.get_configc                 C   r.   r   r   r/   r   r   r   r1     r2   z$ThresholdedReLU.compute_output_shaperS   r3   r   r   r   r   rT      s    rT   c                 C   s   | t jkr	t jjS dS )a	  Large negative number as Tensor.

  This function is necessary because the standard value for epsilon
  in this module (-1e9) cannot be represented using tf.float16

  Args:
    tensor_type: a dtype to determine the type.

  Returns:
    a large negative number.
  g    e)r   Zfloat16min)Ztensor_typer   r   r   _large_compatible_negative  s   
rY   c                       sF   e Zd ZdZd fdd	ZdddZ fdd	Zejd
d Z	  Z
S )Softmaxa  Softmax activation function.

  Example without mask:

  >>> inp = np.asarray([1., 2., 1.])
  >>> layer = tf.keras.layers.Softmax()
  >>> layer(inp).numpy()
  array([0.21194157, 0.5761169 , 0.21194157], dtype=float32)
  >>> mask = np.asarray([True, False, True], dtype=bool)
  >>> layer(inp, mask).numpy()
  array([0.5, 0. , 0.5], dtype=float32)

  Input shape:
    Arbitrary. Use the keyword argument `input_shape`
    (tuple of integers, does not include the samples axis)
    when using this layer as the first layer in a model.

  Output shape:
    Same shape as the input.

  Args:
    axis: Integer, or list of Integers, axis along which the softmax
      normalization is applied.
  Call arguments:
    inputs: The inputs, or logits to the softmax layer.
    mask: A boolean mask of the same shape as `inputs`. Defaults to `None`. The
      mask specifies 1 to keep and 0 to mask.

  Returns:
    softmaxed output with the same shape as `inputs`.
  c                    s&   t t| jdi | d| _|| _d S r<   )r   rZ   r   r   axis)r   r\   r   r   r   r   r   B  s   
zSoftmax.__init__Nc                 C   s   |d urdt ||j t|j }||7 }t| jttfr=t| jdkr3t 	|t j
|| jdd S tj|| jd dS tj|| jdS )NrR   rD   T)r\   Zkeepdimsr   )r\   )r
   rV   rW   rY   rB   r\   rC   r)   rK   expZreduce_logsumexpr   Zsoftmax)r   r"   maskZadderr   r   r   r#   G  s   

zSoftmax.callc                    s4   d| j i}tt|  }tt| t|  S )Nr\   )r\   r   rZ   r'   r(   r)   r*   r+   r   r   r   r'   Z  s   
zSoftmax.get_configc                 C   r.   r   r   r/   r   r   r   r1   _  r2   zSoftmax.compute_output_shape)r[   r   r3   r   r   r   r   rZ   !  s     
rZ   c                       sD   e Zd ZdZd fdd	Zdd Z fdd	Zejd
d Z	  Z
S )ReLUaY  Rectified Linear Unit activation function.

  With default values, it returns element-wise `max(x, 0)`.

  Otherwise, it follows:

  ```
    f(x) = max_value if x >= max_value
    f(x) = x if threshold <= x < max_value
    f(x) = negative_slope * (x - threshold) otherwise
  ```

  Usage:

  >>> layer = tf.keras.layers.ReLU()
  >>> output = layer([-3.0, -1.0, 0.0, 2.0])
  >>> list(output.numpy())
  [0.0, 0.0, 0.0, 2.0]
  >>> layer = tf.keras.layers.ReLU(max_value=1.0)
  >>> output = layer([-3.0, -1.0, 0.0, 2.0])
  >>> list(output.numpy())
  [0.0, 0.0, 0.0, 1.0]
  >>> layer = tf.keras.layers.ReLU(negative_slope=1.0)
  >>> output = layer([-3.0, -1.0, 0.0, 2.0])
  >>> list(output.numpy())
  [-3.0, -1.0, 0.0, 2.0]
  >>> layer = tf.keras.layers.ReLU(threshold=1.5)
  >>> output = layer([-3.0, -1.0, 1.0, 2.0])
  >>> list(output.numpy())
  [0.0, 0.0, 0.0, 2.0]

  Input shape:
    Arbitrary. Use the keyword argument `input_shape`
    (tuple of integers, does not include the batch axis)
    when using this layer as the first layer in a model.

  Output shape:
    Same shape as the input.

  Args:
    max_value: Float >= 0. Maximum activation value. Default to None, which
      means unlimited.
    negative_slope: Float >= 0. Negative slope coefficient. Default to 0.
    threshold: Float >= 0. Threshold value for thresholded activation. Default
      to 0.
  Nr   c                    s   t t| jdi | |d ur|dk rtd| |d u s!|dk r'td| |d u s/|dk r5td| d| _|d urAt|}|| _t|| _t|| _	d S )Ng        z=max_value of a ReLU layer cannot be a negative value. Got: %szBnegative_slope of a ReLU layer cannot be a negative value. Got: %sz=threshold of a ReLU layer cannot be a negative value. Got: %sTr   )
r   r_   r   r   r   r   r   	max_valuenegative_slope	threshold)r   r`   ra   rb   r   r   r   r   r     s&   
zReLU.__init__c                 C   s   t j|| j| j| jdS )N)r   r`   rb   )r   r    ra   r`   rb   r!   r   r   r   r#     s
   z	ReLU.callc                    s<   | j | j| jd}tt|  }tt| t|  S )N)r`   ra   rb   )	r`   ra   rb   r   r_   r'   r(   r)   r*   r+   r   r   r   r'     s   zReLU.get_configc                 C   r.   r   r   r/   r   r   r   r1     r2   zReLU.compute_output_shape)Nr   r   r3   r   r   r   r   r_   d  s    /	r_   N)r7   Ztensorflow.python.frameworkr   Ztensorflow.python.kerasr   r   r   r   Z)tensorflow.python.keras.engine.base_layerr   Z)tensorflow.python.keras.engine.input_specr   Ztensorflow.python.keras.utilsr	   Ztensorflow.python.opsr
   r   r   r:   rQ   rT   rY   rZ   r_   r   r   r   r   <module>   s$   8]+/C