#!/usr/bin/env python
# coding: utf-8

# # Testing Derivatives and Automatic Differentiation
# 
# Copyright (C) 2026 Andreas Kloeckner
# 
# <details>
# <summary>MIT License</summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# 
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# 
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# </details>

# In[6]:


import numpy as np
import numpy.linalg as la


# In[60]:


def f(xvec):
    x, y = xvec
    return np.array([
        x*y + 2*y**3 - 2,
        x**2*y + 4*y**2*np.cos(x) - 4
        ])


# In[64]:


def Jf(xvec):
    x, y = xvec
    return np.array([
        [y, x + 6*y**2],
        [2*x*y - 4*y**2*np.sin(x), x**2 + 8*y*np.cos(x)]
        ])


# In[65]:


x = np.random.randn(2)

s = np.random.randn(2)
s /= la.norm(s, 2)


# In[67]:


for h in [1e-1, 1e-2, 1e-3, 1e-4, 1e-5]:
    print(h, (f(x + h*s) - f(x))/h - Jf(x)@s)


# Now try centered differences.

# In[68]:


for h in [1e-1, 1e-2, 1e-3, 1e-4, 1e-5]:
    print(h, (f(x + h*s) - f(x-h*s))/(2*h) - Jf(x)@s)


# ## Automatic differentiation (with JAX)

# In[25]:


import jax.numpy as jnp
from jax import jacfwd, jacrev, make_jaxpr, jvp


# In[23]:


def f(xvec):
    x, y = xvec
    return jnp.array([
        x*y + 2*y**3 - 2,
        x**2*y + 4*y**2*jnp.cos(x) - 4
        ])


# In[43]:


x = np.random.randn(2)

s = np.random.randn(2)
s /= la.norm(s, 2)


# Now subject the JAX-computed Jacobian to the same test as above:

# In[42]:


Jf = jacfwd(f)

for h in [1e-1, 1e-2, 1e-3, 1e-4]:
    print(h, (f(x + h*s) - f(x))/h - Jf(x)@s)


# Is there a computationally more efficient variant? Consider using
# ```
# _, jvp_val = jvp(f, (x,), (s,))
# ```

# In[50]:


for h in [1e-1, 1e-2, 1e-3, 1e-4]:
    _, jvp_val = jvp(f, (x,), (s,))
    print(h, (f(x + h*s) - f(x))/h - jvp_val)


# ### How does it work?

# In[51]:


print(make_jaxpr(f)(jnp.array([1.,2])))


# In[52]:


print(make_jaxpr(jacfwd(f))(jnp.array([1.,2])))


# In[53]:


print(make_jaxpr(jacrev(f))(jnp.array([1.,2])))


# - Comment on `jacfwd` vs `jacrev`.
# - Comment on `jvp` vs `vjp`.
# - Mention `jit`.
# - Mention `vmap`.

# In[ ]:




