|
3 | 3 | import tifffile
|
4 | 4 | import json
|
5 | 5 | import numpy as np
|
6 |
| - |
7 | 6 | # import kneed
|
8 | 7 | from scipy.optimize import curve_fit
|
9 | 8 | from scipy import stats
|
10 | 9 | import torch
|
11 |
| -from representativity.old import util |
| 10 | +from representativity import util, microlib_statistics |
12 | 11 | from torch.nn.functional import interpolate
|
| 12 | +from mpl_toolkits.axes_grid1 import make_axes_locatable |
13 | 13 |
|
14 |
| -with open("data_gen2.json", "r") as fp: |
15 |
| - data = json.load(fp)["generated_data"] |
| 14 | +# with open("data_gen2.json", "r") as fp: |
| 15 | +# data = json.load(fp)["generated_data"] |
16 | 16 |
|
17 |
| -l = len(list(data.keys())) |
| 17 | +# l=len(list(data.keys())) |
18 | 18 | # l=3
|
19 |
| -c = [(0, 0, 0), (0.5, 0.5, 0.5)] |
| 19 | +c=[(0,0,0), (0.5,0.5,0.5)] |
20 | 20 | # plotting = [f'microstructure{f}' for f in [235, 209,205,177]]
|
21 |
| -plotting = [f"microstructure{f}" for f in [235, 228, 205, 177]] |
| 21 | +plotting_ims = [f'microstructure{f}' for f in [235, 228, 205,177]] |
22 | 22 |
|
23 | 23 | # plotting = [k for k in data.keys()]
|
24 |
| -l = len(plotting) |
| 24 | +l = len(plotting_ims) |
25 | 25 | fig, axs = plt.subplots(l, 3)
|
26 |
| -fig.set_size_inches(12, l * 4) |
27 |
| -preds = [[], []] |
28 |
| -irs = [[], []] |
29 |
| -sas = [] |
30 |
| -i = 0 |
31 |
| -for n in list(data.keys()): |
32 |
| - if n not in plotting: |
| 26 | +fig.set_size_inches(12, l*3.5) |
| 27 | +preds = [[],[]] |
| 28 | +irs = [[],[]] |
| 29 | +colors = {"cls": 'tab:orange', "stds": 'tab:green'} |
| 30 | + |
| 31 | +all_data, micros, netG, v_names, run_v_names = microlib_statistics.json_preprocessing() |
| 32 | +lens_for_fit = list(range(500, 1000, 20)) # lengths of images for fitting L_characteristic |
| 33 | +plotting_ims = [micro for micro in micros if micro.split('/')[-1] in plotting_ims] |
| 34 | +# run the statistical analysis on the microlib dataset |
| 35 | +for i, p in enumerate(plotting_ims): |
| 36 | + |
| 37 | + try: |
| 38 | + netG.load_state_dict(torch.load(p + "_Gen.pt")) |
| 39 | + except: # if the image is greayscale it's excepting because there's only 1 channel |
33 | 40 | continue
|
34 |
| - img = tifffile.imread(f"/home/amir/microlibDataset/{n}/{n}.tif") |
35 |
| - d = data[n] |
36 |
| - d1 = data[n] |
37 |
| - |
38 |
| - csets = [["black", "black"], ["gray", "gray"]] |
39 |
| - for j, met in enumerate(["vf", "sa"]): |
40 |
| - cs = csets[j] |
41 |
| - img_dims = [np.array([int(im_len)] * 2) for im_len in d["ls"]] |
42 |
| - ns = util.ns_from_dims(img_dims, d[f"ir_{met}"]) |
43 |
| - berns_vf = util.bernouli(d[f"{met}"], ns) |
44 |
| - axs[i, 1].scatter( |
45 |
| - d["ls"], |
46 |
| - d[f"err_exp_{met}"], |
47 |
| - c=cs[0], |
48 |
| - s=8, |
49 |
| - marker="x", |
50 |
| - label=f"{met} errors from sampling", |
51 |
| - ) |
52 |
| - axs[i, 1].plot(d["ls"], berns_vf, c=cs[0], label=f"{met} errors from fitted IR") |
53 |
| - # axs[i, 1].plot(d[f'ls'], d[f'err_model_{met}'], c=cs[0], label = f'{met} errors from bernouli') |
54 |
| - y = d[f"tpc_{met}"][ |
55 |
| - 0 |
56 |
| - ] # TODO write that in sa tpc, only the first direction is shown, or do something else, maybe normalise the tpc? We can do sum, because that's how we calculate the ir! |
57 |
| - x = d[f"tpc_{met}_dist"] |
58 |
| - y = np.array(y) |
59 |
| - |
60 |
| - # TODO erase this afterwards: |
61 |
| - if met == "vf": |
62 |
| - ir = np.round(d[f"ir_vf"], 1) |
63 |
| - axs[i, 2].plot(x, y, c=cs[1], label=f"Volume fraction 2PC") |
64 |
| - axs[i, 2].axhline(d["vf"] ** 2, linestyle="dashed", label="$p^2$") |
65 |
| - axs[i, 2].plot( |
66 |
| - [0, ir], |
67 |
| - [d["vf"] ** 2 - 0.02, d["vf"] ** 2 - 0.02], |
68 |
| - c="green", |
69 |
| - linewidth=3, |
70 |
| - label=r"$\tilde{a}_2$", |
71 |
| - ) |
72 |
| - ticks = [0, int(ir), 20, 40, 60, 80, 100] |
73 |
| - ticklabels = map(str, ticks) |
74 |
| - axs[i, 2].set_xticks(ticks) |
75 |
| - axs[i, 2].set_xticklabels(ticklabels) |
76 |
| - axs[i, 2].fill_between( |
77 |
| - x, d["vf"] ** 2, y, alpha=0.5, label=f"Integrated part" |
78 |
| - ) |
79 |
| - axs[i, 2].legend() |
80 |
| - |
81 |
| - # axs[i, 2].scatter(x[knee], y_data[knee]/y.max(), c =cs[1], marker = 'x', label=f'{met} ir from tpc', s=100) |
82 |
| - ir = d[f"ir_{met}"] |
83 |
| - pred_ir = util.tpc_to_ir(d[f"tpc_{met}_dist"], d[f"tpc_{met}"]) |
84 |
| - pred_ir = pred_ir * 1.61 |
85 |
| - # axs[i, 2].scatter(x[round(pred_ir)], y[round(pred_ir)], c =cs[1], marker = 'x', label=f'{met} predicted tpc IR', s=100) |
86 |
| - # axs[i, 2].scatter(x[round(ir)], y[round(ir)], facecolors='none', edgecolors = cs[1], label=f'{met} fitted IR', s=100) |
87 |
| - |
88 |
| - irs[j].append(ir) |
89 |
| - if i == 0: |
90 |
| - axs[i, 1].legend() |
91 |
| - axs[i, 2].legend() |
92 |
| - axs[i, 1].set_xlabel("Image length size [pixels]") |
93 |
| - axs[i, 1].set_ylabel("Volume fraction percentage error [%]") |
94 |
| - axs[i, 2].set_ylabel("2-point correlation function") |
95 |
| - ir = np.round(d[f"ir_vf"], 2) |
96 |
| - sas.append(d["sa"]) |
97 |
| - im = img[0] * 255 |
98 |
| - si_size, nirs = 160, 5 |
99 |
| - sicrop = int(ir * nirs) |
100 |
| - print(ir, sicrop) |
101 |
| - subim = torch.tensor(im[-sicrop:, -sicrop:]).unsqueeze(0).unsqueeze(0).float() |
102 |
| - subim = interpolate(subim, size=(si_size, si_size), mode="nearest")[0, 0] |
103 |
| - subim = np.stack([subim] * 3, axis=-1) |
104 |
| - |
105 |
| - subim[:5, :, :] = 125 |
106 |
| - subim[:, :5, :] = 125 |
107 |
| - # subim[5:20, 5:50, :] |
108 |
| - subim[10:15, 10 : 10 + si_size // nirs, :] = 0 |
109 |
| - subim[10:15, 10 : 10 + si_size // nirs, 1:-1] = 125 |
110 |
| - |
111 |
| - im = np.stack([im] * 3, axis=-1) |
112 |
| - im[-si_size:, -si_size:] = subim |
113 |
| - axs[i, 0].imshow(im) |
114 |
| - axs[i, 0].set_xticks([]) |
115 |
| - axs[i, 0].set_yticks([]) |
116 |
| - axs[i, 0].set_ylabel(f"M{n[1:]}") |
117 |
| - axs[i, 0].set_xlabel( |
118 |
| - f"Volume fraction " |
119 |
| - + r"$\tilde{a}_2$: " |
120 |
| - + f"{ir} Inset mag: x{np.round(si_size/sicrop, 2)}" |
121 |
| - ) |
122 |
| - |
123 |
| - i += 1 |
| 41 | + imsize = 1600 |
| 42 | + lf = imsize//32 + 2 # the size of G's input |
| 43 | + many_images = util.generate_image(netG, lf=lf, threed=False, reps=10) |
| 44 | + pf = many_images.mean().cpu().numpy() |
| 45 | + small_imsize = 512 |
| 46 | + img = many_images[0].detach().cpu().numpy()[:small_imsize, :small_imsize] |
| 47 | + |
| 48 | + csets = [['black', 'black'], ['gray', 'gray']] |
| 49 | + conf = 0.95 |
| 50 | + errs = util.real_image_stats(many_images, lens_for_fit, pf, conf=conf) |
| 51 | + sizes_for_fit = [[lf,lf] for lf in lens_for_fit] |
| 52 | + real_cls = util.fit_cls(errs, sizes_for_fit, pf) |
| 53 | + stds = errs / stats.norm.interval(conf)[1] * pf / 100 |
| 54 | + std_fit = (real_cls**2/(np.array(lens_for_fit)**2)*pf*(1-pf))**0.5 |
| 55 | + # print(stds) |
| 56 | + vars = stds**2 |
| 57 | + # from variations to L_characteristic using image size and phase fraction |
| 58 | + clss = (np.array(lens_for_fit)**2*vars/pf/(1-pf))**0.5 |
| 59 | + print(clss) |
| 60 | + |
| 61 | + axs_twin = axs[i, 1].twinx() |
| 62 | + stds_scatter = axs_twin.scatter(lens_for_fit, stds, s=8, color=colors["stds"], label = f'Standard deviations') |
| 63 | + twin_fit = axs_twin.plot(lens_for_fit, std_fit, color=colors["stds"], label = f'Standard deviations fit') |
| 64 | + axs_twin.tick_params(axis='y', labelcolor=colors["stds"]) |
| 65 | + axs_twin.set_ylim(0, 0.025) |
| 66 | + cls_scatter = axs[i, 1].scatter(lens_for_fit, clss, s=8, color=colors["cls"], label = f'Characteristic length scales') |
| 67 | + cls_fit = axs[i, 1].hlines(real_cls, lens_for_fit[0], lens_for_fit[-1], color=colors["cls"], label = f'Characteristic length scales fit') |
| 68 | + axs[i, 1].tick_params(axis='y', labelcolor=colors["cls"]) |
| 69 | + axs[i, 1].set_ylim(0, 28) |
| 70 | + |
| 71 | + dims = len(img.shape) |
| 72 | + # print(f'starting tpc radial') |
| 73 | + tpc = util.tpc_radial(img, threed=False) |
| 74 | + cls = util.tpc_to_cls(tpc, img, img.shape) |
| 75 | + |
| 76 | + contour = axs[i, 2].contourf(tpc, cmap='plasma', levels = 200) |
| 77 | + divider = make_axes_locatable(axs[i, 2]) |
| 78 | + cax = divider.append_axes('right', size='5%', pad=0.05) |
| 79 | + fig.colorbar(contour, cax=cax, orientation='vertical') |
| 80 | + |
| 81 | + if i == 0: |
| 82 | + plots = [stds_scatter, twin_fit[0], cls_scatter, cls_fit] |
| 83 | + label_plots = [plot.get_label() for plot in plots] |
| 84 | + axs[i,1].legend(plots, label_plots) |
| 85 | + |
| 86 | + # axs[i,2].legend() |
| 87 | + # axs[i,1].set_xlabel('Image length size [pixels]') |
| 88 | + # axs[i,1].set_ylabel('Volume fraction percentage error [%]') |
| 89 | + # axs[i,2].set_ylabel('2-point correlation function') |
| 90 | + # ir = np.round(d[f'ir_vf'], 2) |
| 91 | + # im = img[0]*255 |
| 92 | + # si_size, nirs = 160, 5 |
| 93 | + # sicrop = int(ir*nirs) |
| 94 | + # print(ir, sicrop) |
| 95 | + # subim=torch.tensor(im[-sicrop:,-sicrop:]).unsqueeze(0).unsqueeze(0).float() |
| 96 | + # subim = interpolate(subim, size=(si_size,si_size), mode='nearest')[0,0] |
| 97 | + # subim = np.stack([subim]*3, axis=-1) |
| 98 | + |
| 99 | + # subim[:5,:,:] = 125 |
| 100 | + # subim[:,:5,:] = 125 |
| 101 | + # # subim[5:20, 5:50, :] |
| 102 | + # subim[10:15, 10:10+si_size//nirs, :] = 0 |
| 103 | + # subim[10:15, 10:10+si_size//nirs, 1:-1] = 125 |
| 104 | + |
| 105 | + # im = np.stack([im]*3, axis=-1) |
| 106 | + # im[-si_size:,-si_size:] = subim |
| 107 | + # axs[i, 0].imshow(im) |
| 108 | + # axs[i, 0].set_xticks([]) |
| 109 | + # axs[i, 0].set_yticks([]) |
| 110 | + # axs[i, 0].set_ylabel(f'M{n[1:]}') |
| 111 | + # axs[i, 0].set_xlabel(f'Volume fraction '+ r'$\tilde{a}_2$: '+ f'{ir} Inset mag: x{np.round(si_size/sicrop, 2)}') |
| 112 | + |
| 113 | + i+=1 |
124 | 114 |
|
125 | 115 |
|
126 | 116 | plt.tight_layout()
|
127 |
| -plt.savefig("fig2.pdf", format="pdf") |
| 117 | +plt.savefig('fig2.pdf', format='pdf') |
128 | 118 |
|
129 | 119 | # fig, axs = plt.subplots(1,2)
|
130 | 120 | # fig.set_size_inches(10,5)
|
|
154 | 144 | # # res = 100*(np.mean((y-targ)**2/targ))**0.5
|
155 | 145 |
|
156 | 146 | # print(res,res2)
|
| 147 | + |
| 148 | + |
| 149 | + |
| 150 | + |
| 151 | + |
0 commit comments