EPOCHS = 15
decide = torch.optim.AdamW(mannequin.parameters(), lr=1e-3, weight_decay=1e-4)
sched = torch.optim.lr_scheduler.CosineAnnealingLR(decide, T_max=EPOCHS)
loss_fn = nn.MSELoss()
hist = {"tr": [], "va": [], "r": []}
def pearson(a, b):
a, b = a - a.imply(), b - b.imply()
return (a*b).sum() / (a.norm()*b.norm() + 1e-8)
print("n" + "="*64)
print(f"{'Epoch':>5} | {'practice':>9} | {'val':>9} | {'val_r':>7}")
print("="*64)
for ep in vary(EPOCHS):
mannequin.practice(); tr = []
for batch in train_loader:
x, y = prep(batch)
loss = loss_fn(mannequin(x), y)
decide.zero_grad(); loss.backward()
torch.nn.utils.clip_grad_norm_(mannequin.parameters(), 1.0)
decide.step(); tr.append(loss.merchandise())
sched.step()
mannequin.eval(); va, P, T = [], [], []
with torch.no_grad():
for batch in val_loader:
x, y = prep(batch); p = mannequin(x)
va.append(loss_fn(p, y).merchandise()); P.append(p.cpu()); T.append(y.cpu())
P, T = torch.cat(P), torch.cat(T)
r = pearson(P, T).merchandise()
hist["tr"].append(np.imply(tr)); hist["va"].append(np.imply(va)); hist["r"].append(r)
print(f"{ep+1:>5d} | {np.imply(tr):>9.4f} | {np.imply(va):>9.4f} | {r:>+7.3f}")
mannequin.eval(); P, T = [], []
with torch.no_grad():
for batch in test_loader:
x, y = prep(batch)
P.append(mannequin(x).cpu()); T.append(y.cpu())
P, T = torch.cat(P), torch.cat(T)
test_r = pearson(P, T).merchandise()
test_mse = ((P - T) ** 2).imply().merchandise()
print(f"nTEST | Pearson r = {test_r:+.3f} MSE = {test_mse:.3f}")
print(f"(Artificial-MEG indicators are random by design — small/zero r is anticipated.)")
fig, ax = plt.subplots(1, 3, figsize=(15, 4))
ax[0].plot(hist["tr"], label="practice"); ax[0].plot(hist["va"], label="val")
ax[0].set(xlabel="Epoch", ylabel="MSE", title="Loss curves"); ax[0].legend(); ax[0].grid(alpha=.3)
ax[1].plot(hist["r"], coloration="C2"); ax[1].axhline(0, coloration="okay", ls="--", alpha=.4)
ax[1].set(xlabel="Epoch", ylabel="Pearson r", title="Validation correlation"); ax[1].grid(alpha=.3)
m = float(max(T.abs().max(), P.abs().max()))
ax[2].scatter(T.numpy(), P.numpy(), s=10, alpha=.35)
ax[2].plot([-m, m], [-m, m], "k--", alpha=.4)
ax[2].set(xlabel="True (z-scored char depend)", ylabel="Predicted",
title=f"Take a look at predictions (r = {test_r:+.3f})"); ax[2].grid(alpha=.3)
plt.tight_layout(); plt.present()
print("n✅ Tutorial full!")
print(f" • Examine used : {study_name}")
print(f" • Pipeline : Chain → Segmenter → SegmentDataset → DataLoader")
print(f" • Customized extractor : CharCount (subclass of BaseStatic)")
print(f" • Constructed-in extractor: MegExtractor @ 100 Hz")
print(f" • Mannequin : 1×1 spatial conv + 2 temporal convs + linear head")
