4D SU(3) Model

December 6, 2023

import os
from pathlib import Path
from typing import Optional

import lovely_tensors as lt
import matplotlib.pyplot as plt
import numpy as np
import torch
import yaml

import l2hmc.group.su3.pytorch.group as g
from l2hmc import get_logger
from l2hmc.common import grab_tensor, print_dict
from l2hmc.configs import dict_to_list_of_overrides, get_experiment
from l2hmc.experiment.pytorch.experiment import Experiment, evaluate  # noqa  # noqa
from l2hmc.utils.dist import setup_torch
from l2hmc.utils.plot_helpers import set_plot_style

lt.monkey_patch()

os.environ['CUDA_VISIBLE_DEVICES'] = '6'
os.environ['COLORTERM'] = 'truecolor;'
os.environ['MASTER_PORT'] = '5433'
# os.environ['MPLBACKEND'] = 'module://matplotlib-backend-kitty'
# plt.switch_backend('module://matplotlib-backend-kitty')
from enrich.style import STYLES
from rich.theme import Theme
from enrich.console import Console

log = get_logger(__name__)
theme = Theme(STYLES)
# log = get_logger('ClimRR')
console = Console(theme=theme, log_path=False, markup=True)
if console.is_jupyter:
    console.is_jupyter = False

_ = setup_torch(precision='float64', backend='DDP', seed=4351)

set_plot_style()

from l2hmc.utils.plot_helpers import (  # noqa
    set_plot_style,
    plot_scalar,
    plot_chains,
    plot_leapfrogs
)
Using device: cpu
Failed to download font: Source Sans Pro, skipping!
Failed to download font: Titillium WebRoboto Condensed, skipping!
[10/04/23 08:06:53][INFO][dist.py:226] - Caught MASTER_PORT:5433 from environment!
[10/04/23 08:06:53][WARNING][dist.py:332] - Setting default dtype: float64
[10/04/23 08:06:53][INFO][dist.py:338] - Global Rank: 0 / 0
def savefig(fig: plt.Figure, fname: str, outdir: os.PathLike):
    pngfile = Path(outdir).joinpath(f"pngs/{fname}.png")
    svgfile = Path(outdir).joinpath(f"svgs/{fname}.svg")
    pngfile.parent.mkdir(exist_ok=True, parents=True)
    svgfile.parent.mkdir(exist_ok=True, parents=True)
    fig.savefig(svgfile, transparent=True, bbox_inches='tight')
    fig.savefig(pngfile, transparent=True, bbox_inches='tight', dpi=300)


def plot_metrics(metrics: dict, title: Optional[str] = None, **kwargs):
    from l2hmc.utils.rich import is_interactive
    from l2hmc.configs import QUARTO_OUTPUTS_DIR
    outdir = Path(f"{QUARTO_OUTPUTS_DIR}/plots-4dSU3/{title}")
    outdir.mkdir(exist_ok=True, parents=True)
    for key, val in metrics.items():
        fig, ax = plot_metric(val, name=key, **kwargs)
        if title is not None:
            ax.set_title(title)
        log.info(f"Saving {key} to {outdir}")
        savefig(fig, f"{key}", outdir=outdir)
        # fpath = outdir.joinpath(f"{key}")
        # plt.savefig(f"{fpath}.svg", bbox_inches='tight')
        # plt.savefig(f"{fpath}.png", bbox_inches='tight', dpi=300)
        # log.info(f"Saving {title} {key} plot to {fpath}")
        if not is_interactive():
            plt.show()


def plot_metric(
        metric: torch.Tensor,
        name: Optional[str] = None,
        **kwargs,
):
    assert len(metric) > 0
    if isinstance(metric[0], (int, float, bool, np.floating)):
        y = np.stack(metric)
        return plot_scalar(y, ylabel=name, **kwargs)
    element_shape = metric[0].shape
    if len(element_shape) == 2:
        if isinstance(metric, torch.Tensor):
            y = grab_tensor(torch.stack(metric))
        else:
            y = np.stack(metric)
        return plot_leapfrogs(y, ylabel=name)
    if len(element_shape) == 1:
        if isinstance(metric, torch.Tensor):
            y = grab_tensor(torch.stack(metric))
        else:
            y = np.stack(metric)
        return plot_chains(y, ylabel=name, **kwargs)
    if len(element_shape) == 0:
        if isinstance(metric, torch.Tensor):
            y = grab_tensor(torch.stack(metric))
        else:
            y = np.stack(metric)
        return plot_scalar(y, ylabel=name, **kwargs)
    raise ValueError
def main():
    from l2hmc.experiment.pytorch.experiment import train_step
    set_plot_style()

    from l2hmc.configs import CONF_DIR
    su3conf = Path(CONF_DIR).joinpath('su3-min-cpu.yaml')
    assert su3conf.is_file()
    # su3conf = Path('su3-min-cpu.yaml')
    with su3conf.open('r') as stream:
        conf = dict(yaml.safe_load(stream))

    log.info(conf)
    overrides = dict_to_list_of_overrides(conf)
    ptExpSU3 = get_experiment(overrides=[*overrides], build_networks=True)
    state = ptExpSU3.trainer.dynamics.random_state(6.0)
    assert isinstance(state.x, torch.Tensor)
    assert isinstance(state.beta, torch.Tensor)
    assert isinstance(ptExpSU3, Experiment)
    xhmc, history_hmc = evaluate(
        nsteps=10,
        exp=ptExpSU3,
        beta=state.beta,
        x=state.x,
        eps=0.1,
        nleapfrog=1,
        job_type='hmc',
        nlog=1,
        nprint=2,
        grab=True
    )
    xhmc = ptExpSU3.trainer.dynamics.unflatten(xhmc)
    log.info(f"checkSU(x_hmc): {g.checkSU(xhmc)}")
    plot_metrics(history_hmc.history, title='HMC', marker='.')
    # ptExpSU3.trainer.dynamics.init_weights(
    #     method='uniform',
    #     min=-1e-16,
    #     max=1e-16,
    #     bias=True,
    #     # xeps=0.001,
    #     # veps=0.001,
    # )
    xeval, history_eval = evaluate(
        nsteps=10,
        exp=ptExpSU3,
        beta=6.0,
        x=state.x,
        job_type='eval',
        nlog=1,
        nprint=2,
        grab=True,
    )
    xeval = ptExpSU3.trainer.dynamics.unflatten(xeval)
    log.info(f"checkSU(x_eval): {g.checkSU(xeval)}")
    plot_metrics(history_eval.history, title='Evaluate', marker='.')

    history = {}
    x = state.x
    for step in range(20):
        log.info(f'TRAIN STEP: {step}')
        x, metrics = ptExpSU3.trainer.train_step((x, state.beta))
        if (step > 0 and step % 2 == 0):
            print_dict(metrics, grab=True)
        if (step > 0 and step % 1 == 0):
            for key, val in metrics.items():
                try:
                    history[key].append(val)
                except KeyError:
                    history[key] = [val]

    x = ptExpSU3.trainer.dynamics.unflatten(x)
    log.info(f"checkSU(x_train): {g.checkSU(x)}")
    plot_metrics(history, title='train', marker='.')
    #
    # for step in range(20):
    #     log.info(f"train step: {step}")
    #     x, metrics = ptExpSU3.trainer.train_step((x, state.beta))
    #     if step % 5 == 0:
    #         print_dict(metrics, grab=True)

    return x, history
# main()
from l2hmc.experiment.pytorch.experiment import train_step
set_plot_style()

from l2hmc.configs import CONF_DIR
su3conf = Path(CONF_DIR).joinpath('su3-min-cpu.yaml')
assert su3conf.is_file()
# su3conf = Path('./conf/su3-min-cpu.yaml')
with su3conf.open('r') as stream:
    conf = dict(yaml.safe_load(stream))

log.info(conf)
overrides = dict_to_list_of_overrides(conf)
ptExpSU3 = get_experiment(overrides=[*overrides], build_networks=True)
[10/04/23 08:06:54][INFO][2091541093.py:12] - {'annealing_schedule': {'beta_final': 6.0, 'beta_init': 6.0}, 'backend': 'DDP', 'conv': 'none', 'dynamics': {'eps': 0.1, 'eps_fixed': True, 'group': 'SU3', 'latvolume': [1, 1, 1, 1], 'nchains': 4, 'nleapfrog': 1, 'use_separate_networks': False, 'use_split_xnets': False, 'verbose': True}, 'framework': 'pytorch', 'init_aim': False, 'init_wandb': False, 'learning_rate': {'clip_norm': 1.0, 'lr_init': 1e-05}, 'loss': {'charge_weight': 0.0, 'plaq_weight': 0.0, 'rmse_weight': 1.0, 'use_mixed_loss': False}, 'net_weights': {'v': {'q': 1.0, 's': 1.0, 't': 1.0}, 'x': {'q': 0.0, 's': 0.0, 't': 0.0}}, 'network': {'activation_fn': 'tanh', 'dropout_prob': 0.0, 'units': [1], 'use_batch_norm': False}, 'restore': False, 'save': False, 'steps': {'log': 1, 'nepoch': 10, 'nera': 1, 'print': 1, 'test': 50}, 'use_tb': False, 'use_wandb': False}
[10/04/23 08:06:54][INFO][experiment.py:251] - Creating outputs/2023-10-04-080654/pytorch/train
[10/04/23 08:06:54][INFO][experiment.py:251] - Creating outputs/2023-10-04-080654/pytorch/eval
[10/04/23 08:06:54][INFO][experiment.py:251] - Creating outputs/2023-10-04-080654/pytorch/hmc
[10/04/23 08:06:54][INFO][dist.py:226] - Caught MASTER_PORT:5433 from environment!
[10/04/23 08:06:54][INFO][dist.py:226] - Caught MASTER_PORT:5433 from environment!
[10/04/23 08:06:54][WARNING][trainer.py:437] - Using `torch.optim.Adam` optimizer
[10/04/23 08:06:54][INFO][trainer.py:284] - num_params in model: 788
[10/04/23 08:06:54][WARNING][trainer.py:250] - logging with freq 1 for wandb.watch
state = ptExpSU3.trainer.dynamics.random_state(6.0)
assert isinstance(state.x, torch.Tensor)
assert isinstance(state.beta, torch.Tensor)
assert isinstance(ptExpSU3, Experiment)
xhmc, history_hmc = evaluate(
    nsteps=10,
    exp=ptExpSU3,
    beta=state.beta,
    x=state.x,
    eps=0.1,
    nleapfrog=1,
    job_type='hmc',
    nlog=1,
    nprint=2,
    grab=True
)
xhmc = ptExpSU3.trainer.dynamics.unflatten(xhmc)
log.info(f"checkSU(x_hmc): {g.checkSU(xhmc)}")
plot_metrics(history_hmc.history, title='HMC', marker='.')
[10/04/23 08:06:54][INFO][experiment.py:117] - Running 10 steps of hmc at beta=6.0000
[10/04/23 08:06:54][INFO][experiment.py:121] - STEP: 0
[10/04/23 08:06:54][INFO][experiment.py:121] - STEP: 1
[10/04/23 08:06:54][INFO][experiment.py:121] - STEP: 2
[10/04/23 08:06:54][INFO][common.py:97] - energy: torch.Size([2, 4]) torch.float64 
[[-10.75791732 -12.34937625  -0.41675933  -1.53564322]
 [-10.75124452 -12.3522306   -0.41267615  -1.54108594]]
logprob: torch.Size([2, 4]) torch.float64 
[[-10.75791732 -12.34937625  -0.41675933  -1.53564322]
 [-10.75124452 -12.3522306   -0.41267615  -1.54108594]]
logdet: torch.Size([2, 4]) torch.float64 
[[0. 0. 0. 0.]
 [0. 0. 0. 0.]]
acc: torch.Size([4]) torch.float64 
[0.99334941 1.         0.99592515 1.        ]
sumlogdet: torch.Size([4]) torch.float64 
[0. 0. 0. 0.]
acc_mask: torch.Size([4]) torch.float32 
[1. 1. 1. 1.]
plaqs: torch.Size([4]) torch.float64 
[0.16450298 0.30335925 0.08750178 0.11000818]
sinQ: torch.Size([4]) torch.float64 
[0.06377054 0.02936343 0.0290893  0.0471427 ]
intQ: torch.Size([4]) torch.float64 
[0.00363448 0.00167351 0.00165789 0.00268681]
dQint: torch.Size([4]) torch.float64 
[0.00063734 0.00026722 0.00153373 0.00079181]
dQsin: torch.Size([4]) torch.float64 
[0.01118282 0.00468872 0.02691084 0.01389309]
loss: None None 
-0.008486187067784774
[10/04/23 08:06:54][INFO][experiment.py:121] - STEP: 3
[10/04/23 08:06:54][INFO][experiment.py:121] - STEP: 4
[10/04/23 08:06:54][INFO][common.py:97] - energy: torch.Size([2, 4]) torch.float64 
[[-11.11456032 -17.6499957   -3.95552998  -6.53463898]
 [-11.13603858 -17.671668    -3.9389084   -6.51679905]]
logprob: torch.Size([2, 4]) torch.float64 
[[-11.11456032 -17.6499957   -3.95552998  -6.53463898]
 [-11.13603858 -17.671668    -3.9389084   -6.51679905]]
logdet: torch.Size([2, 4]) torch.float64 
[[0. 0. 0. 0.]
 [0. 0. 0. 0.]]
acc: torch.Size([4]) torch.float64 
[1.         1.         0.9835158  0.98231826]
sumlogdet: torch.Size([4]) torch.float64 
[0. 0. 0. 0.]
acc_mask: torch.Size([4]) torch.float32 
[1. 1. 1. 1.]
plaqs: torch.Size([4]) torch.float64 
[0.25275746 0.399866   0.08094436 0.14319661]
sinQ: torch.Size([4]) torch.float64 
[0.06062536 0.00580304 0.05690804 0.03510997]
intQ: torch.Size([4]) torch.float64 
[0.00345523 0.00033073 0.00324337 0.00200103]
dQint: torch.Size([4]) torch.float64 
[6.94769306e-04 1.54302575e-05 1.45796993e-03 7.05322708e-06]
dQsin: torch.Size([4]) torch.float64 
[0.0121904  0.00027074 0.02558149 0.00012376]
loss: None None 
-0.007962121892379256
[10/04/23 08:06:54][INFO][experiment.py:121] - STEP: 5
[10/04/23 08:06:54][INFO][experiment.py:121] - STEP: 6
[10/04/23 08:06:54][INFO][common.py:97] - energy: torch.Size([2, 4]) torch.float64 
[[ -7.92357836 -18.88468089  -3.00365154  -4.94505522]
 [ -7.96783894 -18.86501731  -2.98482073  -4.97272838]]
logprob: torch.Size([2, 4]) torch.float64 
[[ -7.92357836 -18.88468089  -3.00365154  -4.94505522]
 [ -7.96783894 -18.86501731  -2.98482073  -4.97272838]]
logdet: torch.Size([2, 4]) torch.float64 
[[0. 0. 0. 0.]
 [0. 0. 0. 0.]]
acc: torch.Size([4]) torch.float64 
[1.         0.98052848 0.98134538 1.        ]
sumlogdet: torch.Size([4]) torch.float64 
[0. 0. 0. 0.]
acc_mask: torch.Size([4]) torch.float32 
[1. 1. 1. 1.]
plaqs: torch.Size([4]) torch.float64 
[0.40639896 0.38496604 0.09990116 0.24746026]
sinQ: torch.Size([4]) torch.float64 
[ 0.04504223  0.00155452 -0.00443203  0.02472714]
intQ: torch.Size([4]) torch.float64 
[ 2.56709954e-03  8.85969084e-05 -2.52595510e-04  1.40927794e-03]
dQint: torch.Size([4]) torch.float64 
[7.55078122e-04 4.03034083e-04 2.18911031e-03 8.63609620e-05]
dQsin: torch.Size([4]) torch.float64 
[0.01324857 0.00707162 0.03841005 0.00151529]
loss: None None 
-0.009571223668505026
[10/04/23 08:06:54][INFO][experiment.py:121] - STEP: 7
[10/04/23 08:06:54][INFO][experiment.py:121] - STEP: 8
[10/04/23 08:06:54][INFO][common.py:97] - energy: torch.Size([2, 4]) torch.float64 
[[-13.98883304  -7.11601894  -9.48035637  -3.92143691]
 [-13.99184124  -7.1014651   -9.49438875  -4.11672224]]
logprob: torch.Size([2, 4]) torch.float64 
[[-13.98883304  -7.11601894  -9.48035637  -3.92143691]
 [-13.99184124  -7.1014651   -9.49438875  -4.11672224]]
logdet: torch.Size([2, 4]) torch.float64 
[[0. 0. 0. 0.]
 [0. 0. 0. 0.]]
acc: torch.Size([4]) torch.float64 
[1.         0.98555155 1.         1.        ]
sumlogdet: torch.Size([4]) torch.float64 
[0. 0. 0. 0.]
acc_mask: torch.Size([4]) torch.float32 
[1. 1. 1. 1.]
plaqs: torch.Size([4]) torch.float64 
[0.4385061  0.34539504 0.04537868 0.37245688]
sinQ: torch.Size([4]) torch.float64 
[0.0153556  0.00315862 0.11135176 0.00892928]
intQ: torch.Size([4]) torch.float64 
[0.00087516 0.00018002 0.00634629 0.00050891]
dQint: torch.Size([4]) torch.float64 
[0.00082473 0.00022505 0.00010956 0.00055608]
dQsin: torch.Size([4]) torch.float64 
[0.01447065 0.00394877 0.00192226 0.00975699]
loss: None None 
-0.010424735653495517
[10/04/23 08:06:54][INFO][experiment.py:121] - STEP: 9
[10/04/23 08:06:54][INFO][2419635544.py:14] - checkSU(x_hmc): (tensor[4] f64 x∈[2.562e-16, 4.133e-16] μ=3.159e-16 σ=7.356e-17 [2.562e-16, 2.620e-16, 3.323e-16, 4.133e-16], tensor[4] f64 x∈[3.601e-16, 7.415e-16] μ=5.139e-16 σ=1.696e-16 [3.601e-16, 4.134e-16, 5.404e-16, 7.415e-16])
[10/04/23 08:06:54][INFO][2088423886.py:19] - Saving energy to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/HMC
[10/04/23 08:06:54][INFO][2088423886.py:19] - Saving logprob to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/HMC
[10/04/23 08:06:55][INFO][2088423886.py:19] - Saving logdet to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/HMC
[10/04/23 08:06:55][INFO][2088423886.py:19] - Saving acc to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/HMC
[10/04/23 08:06:55][INFO][2088423886.py:19] - Saving sumlogdet to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/HMC
[10/04/23 08:06:55][INFO][2088423886.py:19] - Saving acc_mask to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/HMC
[10/04/23 08:06:56][INFO][2088423886.py:19] - Saving plaqs to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/HMC
[10/04/23 08:06:56][INFO][2088423886.py:19] - Saving sinQ to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/HMC
[10/04/23 08:06:56][INFO][2088423886.py:19] - Saving intQ to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/HMC
[10/04/23 08:06:56][INFO][2088423886.py:19] - Saving dQint to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/HMC
[10/04/23 08:06:57][INFO][2088423886.py:19] - Saving dQsin to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/HMC
[10/04/23 08:06:57][INFO][2088423886.py:19] - Saving loss to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/HMC

# ptExpSU3.trainer.dynamics.init_weights(
#     method='uniform',
#     min=-1e-16,
#     max=1e-16,
#     bias=True,
#     # xeps=0.001,
#     # veps=0.001,
# )
xeval, history_eval = evaluate(
    nsteps=10,
    exp=ptExpSU3,
    beta=6.0,
    x=state.x,
    job_type='eval',
    nlog=1,
    nprint=2,
    grab=True,
)
xeval = ptExpSU3.trainer.dynamics.unflatten(xeval)
log.info(f"checkSU(x_eval): {g.checkSU(xeval)}")
plot_metrics(history_eval.history, title='Evaluate', marker='.')
[10/04/23 08:06:58][INFO][experiment.py:117] - Running 10 steps of eval at beta=6.0000
[10/04/23 08:06:58][INFO][experiment.py:121] - STEP: 0
[10/04/23 08:06:59][INFO][experiment.py:121] - STEP: 1
[10/04/23 08:06:59][INFO][experiment.py:121] - STEP: 2
[10/04/23 08:06:59][INFO][common.py:97] - energy: torch.Size([3, 4]) torch.float64 
[[ -9.93909722 -11.71406924 -13.98390472  -6.91867768]
 [ -9.64725014 -11.76741361 -13.84813463  -6.57255943]
 [ -9.24332693 -11.64892261 -13.56752346  -6.14914933]]
logprob: torch.Size([3, 4]) torch.float64 
[[ -9.93909722 -11.71406924 -13.98390472  -6.91867768]
 [ -9.57527769 -11.61717321 -13.6004816   -6.52241563]
 [ -9.17429537 -11.68892811 -13.56843895  -6.15131339]]
logdet: torch.Size([3, 4]) torch.float64 
[[ 0.          0.          0.          0.        ]
 [-0.07197245 -0.1502404  -0.24765303 -0.0501438 ]
 [-0.06903157  0.0400055   0.00091548  0.00216406]]
sldf: torch.Size([3, 4]) torch.float64 
[[ 0.          0.          0.          0.        ]
 [-0.07197245 -0.1502404  -0.24765303 -0.0501438 ]
 [ 0.          0.          0.          0.        ]]
sldb: torch.Size([3, 4]) torch.float64 
[[0.         0.         0.         0.        ]
 [0.         0.         0.         0.        ]
 [0.00294088 0.1902459  0.24856851 0.05230786]]
sld: torch.Size([3, 4]) torch.float64 
[[ 0.          0.          0.          0.        ]
 [-0.07197245 -0.1502404  -0.24765303 -0.0501438 ]
 [-0.06903157  0.0400055   0.00091548  0.00216406]]
xeps: torch.Size([3]) torch.float64 
[0.1 0.1 0.1]
veps: torch.Size([3]) torch.float64 
[0.1 0.1 0.1]
acc: torch.Size([4]) torch.float64 
[0.46542615 0.97517227 0.66003278 0.46423505]
sumlogdet: torch.Size([4]) torch.float64 
[-0.06903157  0.0400055   0.00091548  0.00216406]
beta: torch.Size([]) torch.float64 
6.0
acc_mask: torch.Size([4]) torch.float32 
[1. 1. 1. 1.]
plaqs: torch.Size([4]) torch.float64 
[0.16767915 0.37381953 0.37383779 0.20119684]
sinQ: torch.Size([4]) torch.float64 
[ 0.04301257  0.01916423  0.01386744 -0.01006018]
intQ: torch.Size([4]) torch.float64 
[ 0.00245142  0.00109223  0.00079035 -0.00057336]
dQint: torch.Size([4]) torch.float64 
[0.00119233 0.00094291 0.00155372 0.00095932]
dQsin: torch.Size([4]) torch.float64 
[0.0209205  0.01654419 0.02726158 0.01683214]
loss: None None 
-0.020710585677662853
[10/04/23 08:06:59][INFO][experiment.py:121] - STEP: 3
[10/04/23 08:06:59][INFO][experiment.py:121] - STEP: 4
[10/04/23 08:06:59][INFO][common.py:97] - energy: torch.Size([3, 4]) torch.float64 
[[-12.85390515 -19.69124648 -23.34418961 -11.83338904]
 [-12.61930827 -19.83006702 -23.39287265 -12.32972948]
 [-12.22362162 -18.93555772 -22.24709415 -12.09553454]]
logprob: torch.Size([3, 4]) torch.float64 
[[-12.85390515 -19.69124648 -23.34418961 -11.83338904]
 [-12.65387143 -19.71055885 -23.17380781 -12.2815458 ]
 [-12.22896701 -18.91997701 -22.23396345 -12.07717473]]
logdet: torch.Size([3, 4]) torch.float64 
[[ 0.          0.          0.          0.        ]
 [ 0.03456316 -0.11950817 -0.21906484 -0.04818368]
 [ 0.00534539 -0.01558072 -0.01313071 -0.01835981]]
sldf: torch.Size([3, 4]) torch.float64 
[[ 0.          0.          0.          0.        ]
 [ 0.03456316 -0.11950817 -0.21906484 -0.04818368]
 [ 0.          0.          0.          0.        ]]
sldb: torch.Size([3, 4]) torch.float64 
[[ 0.          0.          0.          0.        ]
 [ 0.          0.          0.          0.        ]
 [-0.02921778  0.10392745  0.20593414  0.02982387]]
sld: torch.Size([3, 4]) torch.float64 
[[ 0.          0.          0.          0.        ]
 [ 0.03456316 -0.11950817 -0.21906484 -0.04818368]
 [ 0.00534539 -0.01558072 -0.01313071 -0.01835981]]
xeps: torch.Size([3]) torch.float64 
[0.1 0.1 0.1]
veps: torch.Size([3]) torch.float64 
[0.1 0.1 0.1]
acc: torch.Size([4]) torch.float64 
[0.53529454 0.46242566 0.32948444 1.        ]
sumlogdet: torch.Size([4]) torch.float64 
[ 0.00534539 -0.         -0.         -0.01835981]
beta: torch.Size([]) torch.float64 
6.0
acc_mask: torch.Size([4]) torch.float32 
[1. 0. 0. 1.]
plaqs: torch.Size([4]) torch.float64 
[0.30234966 0.55960731 0.69219294 0.31462515]
sinQ: torch.Size([4]) torch.float64 
[0.02246026 0.01034247 0.01407288 0.00475271]
intQ: torch.Size([4]) torch.float64 
[0.00128008 0.00058945 0.00080206 0.00027087]
dQint: torch.Size([4]) torch.float64 
[0.0017964  0.         0.         0.00236045]
dQsin: torch.Size([4]) torch.float64 
[0.03151961 0.         0.         0.04141642]
loss: None None 
-0.015773998192892276
[10/04/23 08:06:59][INFO][experiment.py:121] - STEP: 5
[10/04/23 08:06:59][INFO][experiment.py:121] - STEP: 6
[10/04/23 08:06:59][INFO][common.py:97] - energy: torch.Size([3, 4]) torch.float64 
[[-19.55645935 -20.86462233 -20.90289315 -11.80938497]
 [-19.27554945 -20.60377855 -20.79816694 -11.77715066]
 [-18.96590202 -20.34010565 -20.641842   -11.43213333]]
logprob: torch.Size([3, 4]) torch.float64 
[[-19.55645935 -20.86462233 -20.90289315 -11.80938497]
 [-19.31960696 -20.45738215 -20.56264778 -11.72755736]
 [-18.97671809 -20.35406438 -20.61748015 -11.4626642 ]]
logdet: torch.Size([3, 4]) torch.float64 
[[ 0.          0.          0.          0.        ]
 [ 0.04405751 -0.14639641 -0.23551916 -0.0495933 ]
 [ 0.01081607  0.01395873 -0.02436185  0.03053087]]
sldf: torch.Size([3, 4]) torch.float64 
[[ 0.          0.          0.          0.        ]
 [ 0.04405751 -0.14639641 -0.23551916 -0.0495933 ]
 [ 0.          0.          0.          0.        ]]
sldb: torch.Size([3, 4]) torch.float64 
[[ 0.          0.          0.          0.        ]
 [ 0.          0.          0.          0.        ]
 [-0.03324145  0.16035514  0.21115731  0.08012417]]
sld: torch.Size([3, 4]) torch.float64 
[[ 0.          0.          0.          0.        ]
 [ 0.04405751 -0.14639641 -0.23551916 -0.0495933 ]
 [ 0.01081607  0.01395873 -0.02436185  0.03053087]]
xeps: torch.Size([3]) torch.float64 
[0.1 0.1 0.1]
veps: torch.Size([3]) torch.float64 
[0.1 0.1 0.1]
acc: torch.Size([4]) torch.float64 
[0.56004325 0.60016062 0.75170374 0.70700272]
sumlogdet: torch.Size([4]) torch.float64 
[ 0.01081607  0.01395873 -0.02436185  0.03053087]
beta: torch.Size([]) torch.float64 
6.0
acc_mask: torch.Size([4]) torch.float32 
[1. 1. 1. 1.]
plaqs: torch.Size([4]) torch.float64 
[0.50284475 0.55960731 0.58704547 0.23919369]
sinQ: torch.Size([4]) torch.float64 
[-0.01053072  0.01034247  0.01918416 -0.03898123]
intQ: torch.Size([4]) torch.float64 
[-0.00060018  0.00058945  0.00109337 -0.00222166]
dQint: torch.Size([4]) torch.float64 
[0.00135638 0.00120177 0.00227558 0.00031006]
dQsin: torch.Size([4]) torch.float64 
[0.02379895 0.02108615 0.03992728 0.00544025]
loss: None None 
-0.016508422822688693
[10/04/23 08:06:59][INFO][experiment.py:121] - STEP: 7
[10/04/23 08:06:59][INFO][experiment.py:121] - STEP: 8
[10/04/23 08:06:59][INFO][common.py:97] - energy: torch.Size([3, 4]) torch.float64 
[[-18.07899101 -28.71248658 -13.48019371 -14.59335827]
 [-17.76685303 -28.30437183 -12.76586787 -14.82013678]
 [-17.05030815 -28.78619363 -11.86821313 -14.6282822 ]]
logprob: torch.Size([3, 4]) torch.float64 
[[-18.07899101 -28.71248658 -13.48019371 -14.59335827]
 [-17.86336929 -28.37337191 -12.58082071 -14.75738913]
 [-17.21060337 -28.90503573 -11.8680323  -14.61315147]]
logdet: torch.Size([3, 4]) torch.float64 
[[ 0.00000000e+00  0.00000000e+00  0.00000000e+00  0.00000000e+00]
 [ 9.65162556e-02  6.90000737e-02 -1.85047158e-01 -6.27476486e-02]
 [ 1.60295223e-01  1.18842098e-01 -1.80827316e-04 -1.51307243e-02]]
sldf: torch.Size([3, 4]) torch.float64 
[[ 0.          0.          0.          0.        ]
 [ 0.09651626  0.06900007 -0.18504716 -0.06274765]
 [ 0.          0.          0.          0.        ]]
sldb: torch.Size([3, 4]) torch.float64 
[[0.         0.         0.         0.        ]
 [0.         0.         0.         0.        ]
 [0.06377897 0.04984202 0.18486633 0.04761692]]
sld: torch.Size([3, 4]) torch.float64 
[[ 0.00000000e+00  0.00000000e+00  0.00000000e+00  0.00000000e+00]
 [ 9.65162556e-02  6.90000737e-02 -1.85047158e-01 -6.27476486e-02]
 [ 1.60295223e-01  1.18842098e-01 -1.80827316e-04 -1.51307243e-02]]
xeps: torch.Size([3]) torch.float64 
[0.1 0.1 0.1]
veps: torch.Size([3]) torch.float64 
[0.1 0.1 0.1]
acc: torch.Size([4]) torch.float64 
[0.41962759 1.         0.19945604 1.        ]
sumlogdet: torch.Size([4]) torch.float64 
[ 0.16029522  0.1188421  -0.         -0.01513072]
beta: torch.Size([]) torch.float64 
6.0
acc_mask: torch.Size([4]) torch.float32 
[1. 1. 0. 1.]
plaqs: torch.Size([4]) torch.float64 
[0.62270466 0.74549143 0.53797573 0.24602781]
sinQ: torch.Size([4]) torch.float64 
[ 0.01834668  0.00603678 -0.01653119 -0.05096747]
intQ: torch.Size([4]) torch.float64 
[ 0.00104564  0.00034406 -0.00094216 -0.0029048 ]
dQint: torch.Size([4]) torch.float64 
[0.00191687 0.00024381 0.         0.00046954]
dQsin: torch.Size([4]) torch.float64 
[0.03363338 0.0042779  0.         0.00823858]
loss: None None 
-0.01735800592796972
[10/04/23 08:06:59][INFO][experiment.py:121] - STEP: 9
[10/04/23 08:06:59][INFO][1629827420.py:20] - checkSU(x_eval): (tensor[4] f64 x∈[9.577e-14, 0.024] μ=0.009 σ=0.012 [0.024, 2.063e-13, 0.014, 9.577e-14], tensor[4] f64 x∈[1.790e-13, 0.034] μ=0.013 σ=0.016 [0.034, 3.935e-13, 0.016, 1.790e-13])
[10/04/23 08:06:59][INFO][2088423886.py:19] - Saving energy to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/Evaluate
[10/04/23 08:06:59][INFO][2088423886.py:19] - Saving logprob to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/Evaluate
[10/04/23 08:06:59][INFO][2088423886.py:19] - Saving logdet to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/Evaluate
[10/04/23 08:06:59][INFO][2088423886.py:19] - Saving sldf to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/Evaluate
[10/04/23 08:07:00][INFO][2088423886.py:19] - Saving sldb to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/Evaluate
[10/04/23 08:07:00][INFO][2088423886.py:19] - Saving sld to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/Evaluate
[10/04/23 08:07:00][INFO][2088423886.py:19] - Saving xeps to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/Evaluate
[10/04/23 08:07:01][INFO][2088423886.py:19] - Saving veps to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/Evaluate
[10/04/23 08:07:01][INFO][2088423886.py:19] - Saving acc to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/Evaluate
[10/04/23 08:07:01][INFO][2088423886.py:19] - Saving sumlogdet to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/Evaluate
[10/04/23 08:07:01][INFO][2088423886.py:19] - Saving beta to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/Evaluate
[10/04/23 08:07:01][INFO][2088423886.py:19] - Saving acc_mask to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/Evaluate
[10/04/23 08:07:02][INFO][2088423886.py:19] - Saving plaqs to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/Evaluate
[10/04/23 08:07:02][INFO][2088423886.py:19] - Saving sinQ to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/Evaluate
[10/04/23 08:07:02][INFO][2088423886.py:19] - Saving intQ to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/Evaluate
[10/04/23 08:07:02][INFO][2088423886.py:19] - Saving dQint to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/Evaluate
[10/04/23 08:07:03][INFO][2088423886.py:19] - Saving dQsin to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/Evaluate
[10/04/23 08:07:03][INFO][2088423886.py:19] - Saving loss to /Users/samforeman/projects/saforem2/l2hmc-qcd/qmd/outputs/plots-4dSU3/Evaluate

history = {}
x = state.x
for step in range(20):
    log.info(f'TRAIN STEP: {step}')
    x, metrics = ptExpSU3.trainer.train_step((x, state.beta))
    if (step > 0 and step % 2 == 0):
        print_dict(metrics, grab=True)
    if (step > 0 and step % 1 == 0):
        for key, val in metrics.items():
            try:
                history[key].append(val)
            except KeyError:
                history[key] = [val]
[10/04/23 08:07:05][INFO][2642635469.py:4] - TRAIN STEP: 0
[10/04/23 08:07:05][INFO][2642635469.py:4] - TRAIN STEP: 1
[10/04/23 08:07:06][INFO][2642635469.py:4] - TRAIN STEP: 2
[10/04/23 08:07:06][INFO][common.py:97] - energy: torch.Size([3, 4]) torch.float64 
[[-20.11355156 -13.83759796  -4.51987377 -11.45297864]
 [-19.63407248 -12.82094906  -3.89140218 -11.38944608]
 [-18.91054695 -12.57901193  -3.60837652 -10.80563673]]
logprob: torch.Size([3, 4]) torch.float64 
[[-20.11355156 -13.83759796  -4.51987377 -11.45297864]
 [-19.5349711  -12.66848527  -3.6937792  -11.31223158]
 [-18.98136385 -12.5501776   -3.58081924 -10.80513544]]
logdet: torch.Size([3, 4]) torch.float64 
[[ 0.          0.          0.          0.        ]
 [-0.09910138 -0.15246379 -0.19762298 -0.0772145 ]
 [ 0.07081689 -0.02883433 -0.02755728 -0.00050128]]
sldf: torch.Size([3, 4]) torch.float64 
[[ 0.          0.          0.          0.        ]
 [-0.09910138 -0.15246379 -0.19762298 -0.0772145 ]
 [ 0.          0.          0.          0.        ]]
sldb: torch.Size([3, 4]) torch.float64 
[[0.         0.         0.         0.        ]
 [0.         0.         0.         0.        ]
 [0.16991827 0.12362946 0.17006571 0.07671321]]
sld: torch.Size([3, 4]) torch.float64 
[[ 0.          0.          0.          0.        ]
 [-0.09910138 -0.15246379 -0.19762298 -0.0772145 ]
 [ 0.07081689 -0.02883433 -0.02755728 -0.00050128]]
xeps: torch.Size([3]) torch.float64 
[0.1 0.1 0.1]
veps: torch.Size([3]) torch.float64 
[0.1 0.1 0.1]
acc: torch.Size([4]) torch.float64 
[0.32232732 0.2759818  0.39099734 0.52317294]
sumlogdet: torch.Size([4]) torch.float64 
[ 0.07081689 -0.         -0.02755728 -0.00050128]
beta: torch.Size([]) torch.float64 
6.0
acc_mask: torch.Size([4]) torch.float32 
[1. 0. 1. 1.]
loss: None None 
-0.009001684302756496
plaqs: torch.Size([4]) torch.float64 
[0.36183972 0.43746352 0.02919545 0.2244051 ]
sinQ: torch.Size([4]) torch.float64 
[ 0.01845919 -0.00937014  0.00653818  0.03579483]
intQ: torch.Size([4]) torch.float64 
[ 0.00105205 -0.00053403  0.00037263  0.00204006]
dQint: torch.Size([4]) torch.float64 
[0.00068181 0.         0.00033711 0.001986  ]
dQsin: torch.Size([4]) torch.float64 
[0.01196306 0.         0.00591496 0.0348463 ]
[10/04/23 08:07:06][INFO][2642635469.py:4] - TRAIN STEP: 3
[10/04/23 08:07:06][INFO][2642635469.py:4] - TRAIN STEP: 4
[10/04/23 08:07:06][INFO][common.py:97] - energy: torch.Size([3, 4]) torch.float64 
[[-16.99217625 -18.05099071 -10.04985909  -7.1340411 ]
 [-16.61047626 -17.84035555  -9.68866118  -7.14912011]
 [-16.28336399 -18.36191548  -9.14330041  -5.86613539]]
logprob: torch.Size([3, 4]) torch.float64 
[[-16.99217625 -18.05099071 -10.04985909  -7.1340411 ]
 [-16.4708106  -17.6654387   -9.64363079  -7.10061592]
 [-16.17915148 -18.35588077  -9.12314899  -5.81814653]]
logdet: torch.Size([3, 4]) torch.float64 
[[ 0.          0.          0.          0.        ]
 [-0.13966566 -0.17491685 -0.04503039 -0.04850419]
 [-0.10421251 -0.0060347  -0.02015142 -0.04798886]]
sldf: torch.Size([3, 4]) torch.float64 
[[ 0.          0.          0.          0.        ]
 [-0.13966566 -0.17491685 -0.04503039 -0.04850419]
 [ 0.          0.          0.          0.        ]]
sldb: torch.Size([3, 4]) torch.float64 
[[0.         0.         0.         0.        ]
 [0.         0.         0.         0.        ]
 [0.03545315 0.16888215 0.02487897 0.00051533]]
sld: torch.Size([3, 4]) torch.float64 
[[ 0.          0.          0.          0.        ]
 [-0.13966566 -0.17491685 -0.04503039 -0.04850419]
 [-0.10421251 -0.0060347  -0.02015142 -0.04798886]]
xeps: torch.Size([3]) torch.float64 
[0.1 0.1 0.1]
veps: torch.Size([3]) torch.float64 
[0.1 0.1 0.1]
acc: torch.Size([4]) torch.float64 
[0.44351451 1.         0.39585389 0.26823426]
sumlogdet: torch.Size([4]) torch.float64 
[-0.10421251 -0.0060347  -0.         -0.        ]
beta: torch.Size([]) torch.float64 
6.0
acc_mask: torch.Size([4]) torch.float32 
[1. 1. 0. 0.]
loss: None None 
-0.014134476436103167
plaqs: torch.Size([4]) torch.float64 
[0.46996628 0.45118214 0.18011337 0.30434348]
sinQ: torch.Size([4]) torch.float64 
[ 0.03170589 -0.01147666  0.05118318 -0.00268528]
intQ: torch.Size([4]) torch.float64 
[ 0.00180702 -0.00065409  0.00291709 -0.00015304]
dQint: torch.Size([4]) torch.float64 
[0.00065855 0.00207145 0.         0.        ]
dQsin: torch.Size([4]) torch.float64 
[0.01155495 0.03634551 0.         0.        ]
[10/04/23 08:07:06][INFO][2642635469.py:4] - TRAIN STEP: 5
[10/04/23 08:07:06][INFO][2642635469.py:4] - TRAIN STEP: 6
[10/04/23 08:07:06][INFO][common.py:97] - energy: torch.Size([3, 4]) torch.float64 
[[-22.17358114 -18.47427308  -2.95308721 -11.62559496]
 [-21.83570903 -18.22818944  -2.4429929  -11.20037746]
 [-21.81748065 -18.37604145  -2.70650275 -10.07465326]]
logprob: torch.Size([3, 4]) torch.float64 
[[-22.17358114 -18.47427308  -2.95308721 -11.62559496]
 [-21.84417608 -18.06424191  -2.60692839 -11.12021881]
 [-21.89172921 -18.39051785  -2.62673381 -10.08631078]]
logdet: torch.Size([3, 4]) torch.float64 
[[ 0.          0.          0.          0.        ]
 [ 0.00846706 -0.16394753  0.16393549 -0.08015865]
 [ 0.07424856  0.0144764  -0.07976894  0.01165751]]
sldf: torch.Size([3, 4]) torch.float64 
[[ 0.          0.          0.          0.        ]
 [ 0.00846706 -0.16394753  0.16393549 -0.08015865]
 [ 0.          0.          0.          0.        ]]
sldb: torch.Size([3, 4]) torch.float64 
[[ 0.          0.          0.          0.        ]
 [ 0.          0.          0.          0.        ]
 [ 0.0657815   0.17842393 -0.24370443  0.09181616]]
sld: torch.Size([3, 4]) torch.float64 
[[ 0.          0.          0.          0.        ]
 [ 0.00846706 -0.16394753  0.16393549 -0.08015865]
 [ 0.07424856  0.0144764  -0.07976894  0.01165751]]
xeps: torch.Size([3]) torch.float64 
[0.1 0.1 0.1]
veps: torch.Size([3]) torch.float64 
[0.1 0.1 0.1]
acc: torch.Size([4]) torch.float64 
[0.75438538 0.91965634 0.72155015 0.21453462]
sumlogdet: torch.Size([4]) torch.float64 
[ 0.07424856  0.0144764  -0.07976894  0.01165751]
beta: torch.Size([]) torch.float64 
6.0
acc_mask: torch.Size([4]) torch.float32 
[1. 1. 1. 1.]
loss: None None 
-0.019461443124629982
plaqs: torch.Size([4]) torch.float64 
[0.43455592 0.53410535 0.15486684 0.30434348]
sinQ: torch.Size([4]) torch.float64 
[-0.01746029  0.01216864  0.01077427 -0.00268528]
intQ: torch.Size([4]) torch.float64 
[-0.00099512  0.00069353  0.00061406 -0.00015304]
dQint: torch.Size([4]) torch.float64 
[0.0016496  0.00074394 0.00087095 0.0014133 ]
dQsin: torch.Size([4]) torch.float64 
[0.02894377 0.01305311 0.01528163 0.02479769]
[10/04/23 08:07:06][INFO][2642635469.py:4] - TRAIN STEP: 7
[10/04/23 08:07:06][INFO][2642635469.py:4] - TRAIN STEP: 8
[10/04/23 08:07:06][INFO][common.py:97] - energy: torch.Size([3, 4]) torch.float64 
[[-28.94977982 -23.93414904  -5.02251262 -20.14381224]
 [-28.26856486 -24.03288661  -4.19892196 -20.37397293]
 [-27.78459946 -23.56989642  -3.56347647 -20.06837893]]
logprob: torch.Size([3, 4]) torch.float64 
[[-28.94977982 -23.93414904  -5.02251262 -20.14381224]
 [-28.25995613 -23.85544268  -4.41858446 -20.21769226]
 [-27.77851779 -23.59675098  -3.59362754 -20.09340966]]
logdet: torch.Size([3, 4]) torch.float64 
[[ 0.          0.          0.          0.        ]
 [-0.00860873 -0.17744392  0.2196625  -0.15628067]
 [-0.00608167  0.02685456  0.03015107  0.02503073]]
sldf: torch.Size([3, 4]) torch.float64 
[[ 0.          0.          0.          0.        ]
 [-0.00860873 -0.17744392  0.2196625  -0.15628067]
 [ 0.          0.          0.          0.        ]]
sldb: torch.Size([3, 4]) torch.float64 
[[ 0.          0.          0.          0.        ]
 [ 0.          0.          0.          0.        ]
 [ 0.00252705  0.20429848 -0.18951143  0.1813114 ]]
sld: torch.Size([3, 4]) torch.float64 
[[ 0.          0.          0.          0.        ]
 [-0.00860873 -0.17744392  0.2196625  -0.15628067]
 [-0.00608167  0.02685456  0.03015107  0.02503073]]
xeps: torch.Size([3]) torch.float64 
[0.1 0.1 0.1]
veps: torch.Size([3]) torch.float64 
[0.1 0.1 0.1]
acc: torch.Size([4]) torch.float64 
[0.3099755  0.71362471 0.23957588 0.95084655]
sumlogdet: torch.Size([4]) torch.float64 
[-0.          0.          0.          0.02503073]
beta: torch.Size([]) torch.float64 
6.0
acc_mask: torch.Size([4]) torch.float32 
[0. 0. 0. 1.]
loss: None None 
-0.015584381253787876
plaqs: torch.Size([4]) torch.float64 
[0.7403035  0.65171197 0.31634181 0.47457693]
sinQ: torch.Size([4]) torch.float64 
[-0.00859781 -0.00165773 -0.00396142  0.00092218]
intQ: torch.Size([4]) torch.float64 
[-4.90016318e-04 -9.44793472e-05 -2.25773828e-04  5.25577319e-05]
dQint: torch.Size([4]) torch.float64 
[0.         0.         0.         0.00032025]
dQsin: torch.Size([4]) torch.float64 
[0.         0.         0.         0.00561914]
[10/04/23 08:07:06][INFO][2642635469.py:4] - TRAIN STEP: 9
[10/04/23 08:07:06][INFO][2642635469.py:4] - TRAIN STEP: 10
[10/04/23 08:07:06][INFO][common.py:97] - energy: torch.Size([3, 4]) torch.float64 
[[-31.39076793 -27.31985011 -19.54217484 -21.1104468 ]
 [-31.0614939  -27.06503105 -19.07066872 -21.3910664 ]
 [-30.84961158 -26.95602155 -18.69349894 -20.00516103]]
logprob: torch.Size([3, 4]) torch.float64 
[[-31.39076793 -27.31985011 -19.54217484 -21.1104468 ]
 [-31.06645234 -26.87717262 -19.24520362 -21.21648217]
 [-30.82929486 -26.99061806 -18.65210819 -20.00045772]]
logdet: torch.Size([3, 4]) torch.float64 
[[ 0.          0.          0.          0.        ]
 [ 0.00495844 -0.18785843  0.1745349  -0.17458422]
 [-0.02031672  0.03459652 -0.04139075 -0.00470331]]
sldf: torch.Size([3, 4]) torch.float64 
[[ 0.          0.          0.          0.        ]
 [ 0.00495844 -0.18785843  0.1745349  -0.17458422]
 [ 0.          0.          0.          0.        ]]
sldb: torch.Size([3, 4]) torch.float64 
[[ 0.          0.          0.          0.        ]
 [ 0.          0.          0.          0.        ]
 [-0.02527516  0.22245494 -0.21592565  0.16988091]]
sld: torch.Size([3, 4]) torch.float64 
[[ 0.          0.          0.          0.        ]
 [ 0.00495844 -0.18785843  0.1745349  -0.17458422]
 [-0.02031672  0.03459652 -0.04139075 -0.00470331]]
xeps: torch.Size([3]) torch.float64 
[0.1 0.1 0.1]
veps: torch.Size([3]) torch.float64 
[0.1 0.1 0.1]
acc: torch.Size([4]) torch.float64 
[0.57036826 0.71947605 0.41062839 0.32956256]
sumlogdet: torch.Size([4]) torch.float64 
[-0.02031672  0.03459652 -0.04139075 -0.        ]
beta: torch.Size([]) torch.float64 
6.0
acc_mask: torch.Size([4]) torch.float32 
[1. 1. 1. 0.]
loss: None None 
-0.011982253415392767
plaqs: torch.Size([4]) torch.float64 
[0.7403035  0.65171197 0.45503328 0.62236101]
sinQ: torch.Size([4]) torch.float64 
[-0.00859781 -0.00165773 -0.00088107  0.01924983]
intQ: torch.Size([4]) torch.float64 
[-4.90016318e-04 -9.44793472e-05 -5.02151834e-05  1.09710862e-03]
dQint: torch.Size([4]) torch.float64 
[0.00043531 0.00061571 0.00043569 0.        ]
dQsin: torch.Size([4]) torch.float64 
[0.00763793 0.01080326 0.00764459 0.        ]
[10/04/23 08:07:06][INFO][2642635469.py:4] - TRAIN STEP: 11
[10/04/23 08:07:06][INFO][2642635469.py:4] - TRAIN STEP: 12
[10/04/23 08:07:06][INFO][common.py:97] - energy: torch.Size([3, 4]) torch.float64 
[[-31.42781825 -21.38032064 -19.16557544 -24.26085164]
 [-30.89428362 -21.22321069 -18.90945902 -23.55378881]
 [-30.46921912 -20.86892045 -18.93287679 -23.36450964]]
logprob: torch.Size([3, 4]) torch.float64 
[[-31.42781825 -21.38032064 -19.16557544 -24.26085164]
 [-30.88117485 -21.00988585 -19.11822465 -23.54366138]
 [-30.55010401 -20.84704698 -19.01058406 -23.31072478]]
logdet: torch.Size([3, 4]) torch.float64 
[[ 0.          0.          0.          0.        ]
 [-0.01310877 -0.21332485  0.20876563 -0.01012743]
 [ 0.08088489 -0.02187348  0.07770727 -0.05378486]]
sldf: torch.Size([3, 4]) torch.float64 
[[ 0.          0.          0.          0.        ]
 [-0.01310877 -0.21332485  0.20876563 -0.01012743]
 [ 0.          0.          0.          0.        ]]
sldb: torch.Size([3, 4]) torch.float64 
[[ 0.          0.          0.          0.        ]
 [ 0.          0.          0.          0.        ]
 [ 0.09399365  0.19145137 -0.13105835 -0.04365743]]
sld: torch.Size([3, 4]) torch.float64 
[[ 0.          0.          0.          0.        ]
 [-0.01310877 -0.21332485  0.20876563 -0.01012743]
 [ 0.08088489 -0.02187348  0.07770727 -0.05378486]]
xeps: torch.Size([3]) torch.float64 
[0.1 0.1 0.1]
veps: torch.Size([3]) torch.float64 
[0.1 0.1 0.1]
acc: torch.Size([4]) torch.float64 
[0.41573209 0.58668123 0.85642256 0.38669196]
sumlogdet: torch.Size([4]) torch.float64 
[ 0.08088489 -0.          0.07770727 -0.        ]
beta: torch.Size([]) torch.float64 
6.0
acc_mask: torch.Size([4]) torch.float32 
[1. 0. 1. 0.]
loss: None None 
-0.016004372395409854
plaqs: torch.Size([4]) torch.float64 
[0.84037172 0.65154716 0.42313337 0.73863172]
sinQ: torch.Size([4]) torch.float64 
[ 1.47285038e-03 -1.56397921e-03 -6.09823914e-03 -6.72724198e-05]
intQ: torch.Size([4]) torch.float64 
[ 8.39424059e-05 -8.91361261e-05 -3.47557954e-04 -3.83406818e-06]
dQint: torch.Size([4]) torch.float64 
[1.23274406e-05 0.00000000e+00 5.10271910e-04 0.00000000e+00]
dQsin: torch.Size([4]) torch.float64 
[0.0002163  0.         0.00895321 0.        ]
[10/04/23 08:07:06][INFO][2642635469.py:4] - TRAIN STEP: 13
[10/04/23 08:07:06][INFO][2642635469.py:4] - TRAIN STEP: 14
[10/04/23 08:07:06][INFO][common.py:97] - energy: torch.Size([3, 4]) torch.float64 
[[-29.59240183 -21.94551736 -28.47707495 -20.59708329]
 [-29.48303373 -21.10556806 -28.05877271 -20.58985545]
 [-29.39854899 -20.73632654 -27.22089443 -19.77844503]]
logprob: torch.Size([3, 4]) torch.float64 
[[-29.59240183 -21.94551736 -28.47707495 -20.59708329]
 [-29.40940421 -20.98154179 -28.17860182 -20.541757  ]
 [-29.31460821 -20.72420952 -27.17276265 -19.77710774]]
logdet: torch.Size([3, 4]) torch.float64 
[[ 0.          0.          0.          0.        ]
 [-0.07362952 -0.12402627  0.11982911 -0.04809845]
 [-0.08394078 -0.01211702 -0.04813178 -0.00133729]]
sldf: torch.Size([3, 4]) torch.float64 
[[ 0.          0.          0.          0.        ]
 [-0.07362952 -0.12402627  0.11982911 -0.04809845]
 [ 0.          0.          0.          0.        ]]
sldb: torch.Size([3, 4]) torch.float64 
[[ 0.          0.          0.          0.        ]
 [ 0.          0.          0.          0.        ]
 [-0.01031126  0.11190925 -0.16796089  0.04676117]]
sld: torch.Size([3, 4]) torch.float64 
[[ 0.          0.          0.          0.        ]
 [-0.07362952 -0.12402627  0.11982911 -0.04809845]
 [-0.08394078 -0.01211702 -0.04813178 -0.00133729]]
xeps: torch.Size([3]) torch.float64 
[0.1 0.1 0.1]
veps: torch.Size([3]) torch.float64 
[0.1 0.1 0.1]
acc: torch.Size([4]) torch.float64 
[0.75745313 0.2948443  0.27135909 0.44044242]
sumlogdet: torch.Size([4]) torch.float64 
[-0.08394078 -0.01211702 -0.04813178 -0.00133729]
beta: torch.Size([]) torch.float64 
6.0
acc_mask: torch.Size([4]) torch.float32 
[1. 1. 1. 1.]
loss: None None 
-0.012984717158635528
plaqs: torch.Size([4]) torch.float64 
[0.75491793 0.6547223  0.63304782 0.73863172]
sinQ: torch.Size([4]) torch.float64 
[ 3.85535809e-03 -1.41124889e-02 -1.46423245e-02 -6.72724198e-05]
intQ: torch.Size([4]) torch.float64 
[ 2.19729063e-04 -8.04315419e-04 -8.34512428e-04 -3.83406817e-06]
dQint: torch.Size([4]) torch.float64 
[0.00065824 0.00058615 0.00017334 0.00014198]
dQsin: torch.Size([4]) torch.float64 
[0.01154945 0.01028462 0.00304149 0.00249122]
[10/04/23 08:07:06][INFO][2642635469.py:4] - TRAIN STEP: 15
[10/04/23 08:07:06][INFO][2642635469.py:4] - TRAIN STEP: 16
[10/04/23 08:07:06][INFO][common.py:97] - energy: torch.Size([3, 4]) torch.float64 
[[-27.90336074 -24.58447835 -27.70507628 -25.1095963 ]
 [-27.4557147  -23.38939397 -26.86035304 -24.74188789]
 [-26.76730611 -23.12311187 -26.13507215 -24.81611966]]
logprob: torch.Size([3, 4]) torch.float64 
[[-27.90336074 -24.58447835 -27.70507628 -25.1095963 ]
 [-27.42721443 -23.25741435 -27.03208626 -24.69073936]
 [-26.83629231 -23.14150881 -26.14979085 -24.84522864]]
logdet: torch.Size([3, 4]) torch.float64 
[[ 0.          0.          0.          0.        ]
 [-0.02850027 -0.13197962  0.17173322 -0.05114852]
 [ 0.0689862   0.01839694  0.0147187   0.02910898]]
sldf: torch.Size([3, 4]) torch.float64 
[[ 0.          0.          0.          0.        ]
 [-0.02850027 -0.13197962  0.17173322 -0.05114852]
 [ 0.          0.          0.          0.        ]]
sldb: torch.Size([3, 4]) torch.float64 
[[ 0.          0.          0.          0.        ]
 [ 0.          0.          0.          0.        ]
 [ 0.09748647  0.15037655 -0.15701452  0.0802575 ]]
sld: torch.Size([3, 4]) torch.float64 
[[ 0.          0.          0.          0.        ]
 [-0.02850027 -0.13197962  0.17173322 -0.05114852]
 [ 0.0689862   0.01839694  0.0147187   0.02910898]]
xeps: torch.Size([3]) torch.float64 
[0.1 0.1 0.1]
veps: torch.Size([3]) torch.float64 
[0.1 0.1 0.1]
acc: torch.Size([4]) torch.float64 
[0.34401554 0.23622524 0.21112911 0.76769124]
sumlogdet: torch.Size([4]) torch.float64 
[0.0689862  0.         0.0147187  0.02910898]
beta: torch.Size([]) torch.float64 
6.0
acc_mask: torch.Size([4]) torch.float32 
[1. 0. 1. 1.]
loss: None None 
-0.011869373548195221
plaqs: torch.Size([4]) torch.float64 
[0.82255532 0.72111137 0.69612456 0.72896623]
sinQ: torch.Size([4]) torch.float64 
[-0.00564947 -0.00444414 -0.01693866  0.00255894]
intQ: torch.Size([4]) torch.float64 
[-0.00032198 -0.00025329 -0.00096539  0.00014584]
dQint: torch.Size([4]) torch.float64 
[8.49293564e-05 0.00000000e+00 3.84992539e-04 3.15784764e-04]
dQsin: torch.Size([4]) torch.float64 
[0.00149017 0.         0.00675506 0.00554075]
[10/04/23 08:07:06][INFO][2642635469.py:4] - TRAIN STEP: 17
[10/04/23 08:07:06][INFO][2642635469.py:4] - TRAIN STEP: 18
[10/04/23 08:07:06][INFO][common.py:97] - energy: torch.Size([3, 4]) torch.float64 
[[-28.74207541 -24.8965513  -32.81235279 -22.59228204]
 [-27.98828099 -23.91465789 -32.28912627 -22.61626646]
 [-27.23785728 -24.01606216 -31.51716178 -21.73978358]]
logprob: torch.Size([3, 4]) torch.float64 
[[-28.74207541 -24.8965513  -32.81235279 -22.59228204]
 [-28.07501629 -23.97865766 -32.35996206 -22.61968433]
 [-27.18635944 -23.93497767 -31.62224319 -21.75485085]]
logdet: torch.Size([3, 4]) torch.float64 
[[ 0.          0.          0.          0.        ]
 [ 0.0867353   0.06399977  0.07083579  0.00341787]
 [-0.05149783 -0.08108449  0.10508142  0.01506727]]
sldf: torch.Size([3, 4]) torch.float64 
[[0.         0.         0.         0.        ]
 [0.0867353  0.06399977 0.07083579 0.00341787]
 [0.         0.         0.         0.        ]]
sldb: torch.Size([3, 4]) torch.float64 
[[ 0.          0.          0.          0.        ]
 [ 0.          0.          0.          0.        ]
 [-0.13823313 -0.14508427  0.03424563  0.0116494 ]]
sld: torch.Size([3, 4]) torch.float64 
[[ 0.          0.          0.          0.        ]
 [ 0.0867353   0.06399977  0.07083579  0.00341787]
 [-0.05149783 -0.08108449  0.10508142  0.01506727]]
xeps: torch.Size([3]) torch.float64 
[0.1 0.1 0.1]
veps: torch.Size([3]) torch.float64 
[0.1 0.1 0.1]
acc: torch.Size([4]) torch.float64 
[0.21103823 0.38229083 0.30418792 0.43282093]
sumlogdet: torch.Size([4]) torch.float64 
[-0.         -0.08108449  0.          0.01506727]
beta: torch.Size([]) torch.float64 
6.0
acc_mask: torch.Size([4]) torch.float32 
[0. 1. 0. 1.]
loss: None None 
-0.00985098143962743
plaqs: torch.Size([4]) torch.float64 
[0.85381281 0.70579842 0.80681173 0.75974167]
sinQ: torch.Size([4]) torch.float64 
[-0.00332841 -0.00489329 -0.01271997  0.00506889]
intQ: torch.Size([4]) torch.float64 
[-0.0001897  -0.00027888 -0.00072495  0.00028889]
dQint: torch.Size([4]) torch.float64 
[0.         0.00045467 0.         0.00140838]
dQsin: torch.Size([4]) torch.float64 
[0.         0.00797764 0.         0.02471136]
[10/04/23 08:07:06][INFO][2642635469.py:4] - TRAIN STEP: 19
x = ptExpSU3.trainer.dynamics.unflatten(x)
log.info(f"checkSU(x_train): {g.checkSU(x)}")
# plot_metrics(history, title='train', marker='.')
[10/04/23 08:07:06][INFO][1630590238.py:2] - checkSU(x_train): (tensor[4] f64 x∈[3.838e-16, 0.018] μ=0.008 σ=0.009 [0.018, 0.014, 3.838e-16, 9.135e-13], tensor[4] f64 x∈[6.307e-16, 0.026] μ=0.011 σ=0.013 [0.026, 0.017, 6.307e-16, 1.818e-12])
print(history.keys())
dict_keys(['energy', 'logprob', 'logdet', 'sldf', 'sldb', 'sld', 'xeps', 'veps', 'acc', 'sumlogdet', 'beta', 'acc_mask', 'loss', 'plaqs', 'sinQ', 'intQ', 'dQint', 'dQsin'])

Citation

BibTeX citation:
@online{foreman2023,
  author = {Foreman, Sam},
  title = {4D {\$SU(3)\$} {Model}},
  date = {2023-12-06},
  url = {https://saforem2.github.io/l2hmc-qcd/qmd/l2hmc-4DSU3.html},
  langid = {en}
}
For attribution, please cite this work as:
Foreman, Sam. 2023. “4D $SU(3)$ Model.” December 6, 2023. https://saforem2.github.io/l2hmc-qcd/qmd/l2hmc-4DSU3.html.