=0;--i) ret[i] = [];
+ }
+ for(i=n;i>=0;--i) ret[i].push(k[i]);
+ ret[n+1].push(Ai);
+ }
+ } else gather(Ai,ret,k);
+ }
+ }
+ if(k.length>n) k.pop();
+ return ret;
+}
+
+// 6. Coordinate matrices
+numeric.cLU = function LU(A) {
+ var I = A[0], J = A[1], V = A[2];
+ var p = I.length, m=0, i,j,k,a,b,c;
+ for(i=0;im) m=I[i];
+ m++;
+ var L = Array(m), U = Array(m), left = numeric.rep([m],Infinity), right = numeric.rep([m],-Infinity);
+ var Ui, Uj,alpha;
+ for(k=0;k
right[i]) right[i] = j;
+ }
+ for(i=0;i right[i+1]) right[i+1] = right[i]; }
+ for(i=m-1;i>=1;i--) { if(left[i]=0;i--) {
+ while(Uj[k] > i) {
+ ret[i] -= Uv[k]*ret[Uj[k]];
+ k--;
+ }
+ ret[i] /= Uv[k];
+ k--;
+ }
+ return ret;
+};
+
+numeric.cgrid = function grid(n,shape) {
+ if(typeof n === "number") n = [n,n];
+ var ret = numeric.rep(n,-1);
+ var i,j,count;
+ if(typeof shape !== "function") {
+ switch(shape) {
+ case 'L':
+ shape = function(i,j) { return (i>=n[0]/2 || jN) N = Ai[k]; }
+ N++;
+ ret = numeric.rep([N],0);
+ for(k=0;k1) {
+ mid = floor((p+q)/2);
+ if(x[mid] <= x0) p = mid;
+ else q = mid;
+ }
+ return this._at(x0,p);
+ }
+ var n = x0.length, i, ret = Array(n);
+ for(i=n-1;i!==-1;--i) ret[i] = this.at(x0[i]);
+ return ret;
+}
+numeric.Spline.prototype.diff = function diff() {
+ var x = this.x;
+ var yl = this.yl;
+ var yr = this.yr;
+ var kl = this.kl;
+ var kr = this.kr;
+ var n = yl.length;
+ var i,dx,dy;
+ var zl = kl, zr = kr, pl = Array(n), pr = Array(n);
+ var add = numeric.add, mul = numeric.mul, div = numeric.div, sub = numeric.sub;
+ for(i=n-1;i!==-1;--i) {
+ dx = x[i+1]-x[i];
+ dy = sub(yr[i+1],yl[i]);
+ pl[i] = div(add(mul(dy, 6),mul(kl[i],-4*dx),mul(kr[i+1],-2*dx)),dx*dx);
+ pr[i+1] = div(add(mul(dy,-6),mul(kl[i], 2*dx),mul(kr[i+1], 4*dx)),dx*dx);
+ }
+ return new numeric.Spline(x,zl,zr,pl,pr);
+}
+numeric.Spline.prototype.roots = function roots() {
+ function sqr(x) { return x*x; }
+ function heval(y0,y1,k0,k1,x) {
+ var A = k0*2-(y1-y0);
+ var B = -k1*2+(y1-y0);
+ var t = (x+1)*0.5;
+ var s = t*(1-t);
+ return (1-t)*y0+t*y1+A*s*(1-t)+B*s*t;
+ }
+ var ret = [];
+ var x = this.x, yl = this.yl, yr = this.yr, kl = this.kl, kr = this.kr;
+ if(typeof yl[0] === "number") {
+ yl = [yl];
+ yr = [yr];
+ kl = [kl];
+ kr = [kr];
+ }
+ var m = yl.length,n=x.length-1,i,j,k,y,s,t;
+ var ai,bi,ci,di, ret = Array(m),ri,k0,k1,y0,y1,A,B,D,dx,cx,stops,z0,z1,zm,t0,t1,tm;
+ var sqrt = Math.sqrt;
+ for(i=0;i!==m;++i) {
+ ai = yl[i];
+ bi = yr[i];
+ ci = kl[i];
+ di = kr[i];
+ ri = [];
+ for(j=0;j!==n;j++) {
+ if(j>0 && bi[j]*ai[j]<0) ri.push(x[j]);
+ dx = (x[j+1]-x[j]);
+ cx = x[j];
+ y0 = ai[j];
+ y1 = bi[j+1];
+ k0 = ci[j]/dx;
+ k1 = di[j+1]/dx;
+ D = sqr(k0-k1+3*(y0-y1)) + 12*k1*y0;
+ A = k1+3*y0+2*k0-3*y1;
+ B = 3*(k1+k0+2*(y0-y1));
+ if(D<=0) {
+ z0 = A/B;
+ if(z0>x[j] && z0x[j] && z0x[j] && z10) {
+ t0 = t1;
+ z0 = z1;
+ continue;
+ }
+ var side = 0;
+ while(1) {
+ tm = (z0*t1-z1*t0)/(z0-z1);
+ if(tm <= t0 || tm >= t1) { break; }
+ zm = this._at(tm,j);
+ if(zm*z1>0) {
+ t1 = tm;
+ z1 = zm;
+ if(side === -1) z0*=0.5;
+ side = -1;
+ } else if(zm*z0>0) {
+ t0 = tm;
+ z0 = zm;
+ if(side === 1) z1*=0.5;
+ side = 1;
+ } else break;
+ }
+ ri.push(tm);
+ t0 = stops[k+1];
+ z0 = this._at(t0, j);
+ }
+ if(z1 === 0) ri.push(t1);
+ }
+ ret[i] = ri;
+ }
+ if(typeof this.yl[0] === "number") return ret[0];
+ return ret;
+}
+numeric.spline = function spline(x,y,k1,kn) {
+ var n = x.length, b = [], dx = [], dy = [];
+ var i;
+ var sub = numeric.sub,mul = numeric.mul,add = numeric.add;
+ for(i=n-2;i>=0;i--) { dx[i] = x[i+1]-x[i]; dy[i] = sub(y[i+1],y[i]); }
+ if(typeof k1 === "string" || typeof kn === "string") {
+ k1 = kn = "periodic";
+ }
+ // Build sparse tridiagonal system
+ var T = [[],[],[]];
+ switch(typeof k1) {
+ case "undefined":
+ b[0] = mul(3/(dx[0]*dx[0]),dy[0]);
+ T[0].push(0,0);
+ T[1].push(0,1);
+ T[2].push(2/dx[0],1/dx[0]);
+ break;
+ case "string":
+ b[0] = add(mul(3/(dx[n-2]*dx[n-2]),dy[n-2]),mul(3/(dx[0]*dx[0]),dy[0]));
+ T[0].push(0,0,0);
+ T[1].push(n-2,0,1);
+ T[2].push(1/dx[n-2],2/dx[n-2]+2/dx[0],1/dx[0]);
+ break;
+ default:
+ b[0] = k1;
+ T[0].push(0);
+ T[1].push(0);
+ T[2].push(1);
+ break;
+ }
+ for(i=1;i20) { throw new Error("Numerical gradient fails"); }
+ x0[i] = x[i]+h;
+ f1 = f(x0);
+ x0[i] = x[i]-h;
+ f2 = f(x0);
+ x0[i] = x[i];
+ if(isNaN(f1) || isNaN(f2)) { h/=16; continue; }
+ J[i] = (f1-f2)/(2*h);
+ t0 = x[i]-h;
+ t1 = x[i];
+ t2 = x[i]+h;
+ d1 = (f1-f0)/h;
+ d2 = (f0-f2)/h;
+ N = max(abs(J[i]),abs(f0),abs(f1),abs(f2),abs(t0),abs(t1),abs(t2),1e-8);
+ errest = min(max(abs(d1-J[i]),abs(d2-J[i]),abs(d1-d2))/N,h/N);
+ if(errest>eps) { h/=16; }
+ else break;
+ }
+ }
+ return J;
+}
+
+numeric.uncmin = function uncmin(f,x0,tol,gradient,maxit,callback,options) {
+ var grad = numeric.gradient;
+ if(typeof options === "undefined") { options = {}; }
+ if(typeof tol === "undefined") { tol = 1e-8; }
+ if(typeof gradient === "undefined") { gradient = function(x) { return grad(f,x); }; }
+ if(typeof maxit === "undefined") maxit = 1000;
+ x0 = numeric.clone(x0);
+ var n = x0.length;
+ var f0 = f(x0),f1,df0;
+ if(isNaN(f0)) throw new Error('uncmin: f(x0) is a NaN!');
+ var max = Math.max, norm2 = numeric.norm2;
+ tol = max(tol,numeric.epsilon);
+ var step,g0,g1,H1 = options.Hinv || numeric.identity(n);
+ var dot = numeric.dot, inv = numeric.inv, sub = numeric.sub, add = numeric.add, ten = numeric.tensor, div = numeric.div, mul = numeric.mul;
+ var all = numeric.all, isfinite = numeric.isFinite, neg = numeric.neg;
+ var it=0,i,s,x1,y,Hy,Hs,ys,i0,t,nstep,t1,t2;
+ var msg = "";
+ g0 = gradient(x0);
+ while(it= 0.1*t*df0 || isNaN(f1)) {
+ t *= 0.5;
+ ++it;
+ continue;
+ }
+ break;
+ }
+ if(t*nstep < tol) { msg = "Line search step size smaller than tol"; break; }
+ if(it === maxit) { msg = "maxit reached during line search"; break; }
+ g1 = gradient(x1);
+ y = sub(g1,g0);
+ ys = dot(y,s);
+ Hy = dot(H1,y);
+ H1 = sub(add(H1,
+ mul(
+ (ys+dot(y,Hy))/(ys*ys),
+ ten(s,s) )),
+ div(add(ten(Hy,s),ten(s,Hy)),ys));
+ x0 = x1;
+ f0 = f1;
+ g0 = g1;
+ ++it;
+ }
+ return {solution: x0, f: f0, gradient: g0, invHessian: H1, iterations:it, message: msg};
+}
+
+// 10. Ode solver (Dormand-Prince)
+numeric.Dopri = function Dopri(x,y,f,ymid,iterations,msg,events) {
+ this.x = x;
+ this.y = y;
+ this.f = f;
+ this.ymid = ymid;
+ this.iterations = iterations;
+ this.events = events;
+ this.message = msg;
+}
+numeric.Dopri.prototype._at = function _at(xi,j) {
+ function sqr(x) { return x*x; }
+ var sol = this;
+ var xs = sol.x;
+ var ys = sol.y;
+ var k1 = sol.f;
+ var ymid = sol.ymid;
+ var n = xs.length;
+ var x0,x1,xh,y0,y1,yh,xi;
+ var floor = Math.floor,h;
+ var c = 0.5;
+ var add = numeric.add, mul = numeric.mul,sub = numeric.sub, p,q,w;
+ x0 = xs[j];
+ x1 = xs[j+1];
+ y0 = ys[j];
+ y1 = ys[j+1];
+ h = x1-x0;
+ xh = x0+c*h;
+ yh = ymid[j];
+ p = sub(k1[j ],mul(y0,1/(x0-xh)+2/(x0-x1)));
+ q = sub(k1[j+1],mul(y1,1/(x1-xh)+2/(x1-x0)));
+ w = [sqr(xi - x1) * (xi - xh) / sqr(x0 - x1) / (x0 - xh),
+ sqr(xi - x0) * sqr(xi - x1) / sqr(x0 - xh) / sqr(x1 - xh),
+ sqr(xi - x0) * (xi - xh) / sqr(x1 - x0) / (x1 - xh),
+ (xi - x0) * sqr(xi - x1) * (xi - xh) / sqr(x0-x1) / (x0 - xh),
+ (xi - x1) * sqr(xi - x0) * (xi - xh) / sqr(x0-x1) / (x1 - xh)];
+ return add(add(add(add(mul(y0,w[0]),
+ mul(yh,w[1])),
+ mul(y1,w[2])),
+ mul( p,w[3])),
+ mul( q,w[4]));
+}
+numeric.Dopri.prototype.at = function at(x) {
+ var i,j,k,floor = Math.floor;
+ if(typeof x !== "number") {
+ var n = x.length, ret = Array(n);
+ for(i=n-1;i!==-1;--i) {
+ ret[i] = this.at(x[i]);
+ }
+ return ret;
+ }
+ var x0 = this.x;
+ i = 0; j = x0.length-1;
+ while(j-i>1) {
+ k = floor(0.5*(i+j));
+ if(x0[k] <= x) i = k;
+ else j = k;
+ }
+ return this._at(x,i);
+}
+
+numeric.dopri = function dopri(x0,x1,y0,f,tol,maxit,event) {
+ if(typeof tol === "undefined") { tol = 1e-6; }
+ if(typeof maxit === "undefined") { maxit = 1000; }
+ var xs = [x0], ys = [y0], k1 = [f(x0,y0)], k2,k3,k4,k5,k6,k7, ymid = [];
+ var A2 = 1/5;
+ var A3 = [3/40,9/40];
+ var A4 = [44/45,-56/15,32/9];
+ var A5 = [19372/6561,-25360/2187,64448/6561,-212/729];
+ var A6 = [9017/3168,-355/33,46732/5247,49/176,-5103/18656];
+ var b = [35/384,0,500/1113,125/192,-2187/6784,11/84];
+ var bm = [0.5*6025192743/30085553152,
+ 0,
+ 0.5*51252292925/65400821598,
+ 0.5*-2691868925/45128329728,
+ 0.5*187940372067/1594534317056,
+ 0.5*-1776094331/19743644256,
+ 0.5*11237099/235043384];
+ var c = [1/5,3/10,4/5,8/9,1,1];
+ var e = [-71/57600,0,71/16695,-71/1920,17253/339200,-22/525,1/40];
+ var i = 0,er,j;
+ var h = (x1-x0)/10;
+ var it = 0;
+ var add = numeric.add, mul = numeric.mul, y1,erinf;
+ var max = Math.max, min = Math.min, abs = Math.abs, norminf = numeric.norminf,pow = Math.pow;
+ var any = numeric.any, lt = numeric.lt, and = numeric.and, sub = numeric.sub;
+ var e0, e1, ev;
+ var ret = new numeric.Dopri(xs,ys,k1,ymid,-1,"");
+ if(typeof event === "function") e0 = event(x0,y0);
+ while(x0x1) h = x1-x0;
+ k2 = f(x0+c[0]*h, add(y0,mul( A2*h,k1[i])));
+ k3 = f(x0+c[1]*h, add(add(y0,mul(A3[0]*h,k1[i])),mul(A3[1]*h,k2)));
+ k4 = f(x0+c[2]*h, add(add(add(y0,mul(A4[0]*h,k1[i])),mul(A4[1]*h,k2)),mul(A4[2]*h,k3)));
+ k5 = f(x0+c[3]*h, add(add(add(add(y0,mul(A5[0]*h,k1[i])),mul(A5[1]*h,k2)),mul(A5[2]*h,k3)),mul(A5[3]*h,k4)));
+ k6 = f(x0+c[4]*h,add(add(add(add(add(y0,mul(A6[0]*h,k1[i])),mul(A6[1]*h,k2)),mul(A6[2]*h,k3)),mul(A6[3]*h,k4)),mul(A6[4]*h,k5)));
+ y1 = add(add(add(add(add(y0,mul(k1[i],h*b[0])),mul(k3,h*b[2])),mul(k4,h*b[3])),mul(k5,h*b[4])),mul(k6,h*b[5]));
+ k7 = f(x0+h,y1);
+ er = add(add(add(add(add(mul(k1[i],h*e[0]),mul(k3,h*e[2])),mul(k4,h*e[3])),mul(k5,h*e[4])),mul(k6,h*e[5])),mul(k7,h*e[6]));
+ if(typeof er === "number") erinf = abs(er);
+ else erinf = norminf(er);
+ if(erinf > tol) { // reject
+ h = 0.2*h*pow(tol/erinf,0.25);
+ if(x0+h === x0) {
+ ret.msg = "Step size became too small";
+ break;
+ }
+ continue;
+ }
+ ymid[i] = add(add(add(add(add(add(y0,
+ mul(k1[i],h*bm[0])),
+ mul(k3 ,h*bm[2])),
+ mul(k4 ,h*bm[3])),
+ mul(k5 ,h*bm[4])),
+ mul(k6 ,h*bm[5])),
+ mul(k7 ,h*bm[6]));
+ ++i;
+ xs[i] = x0+h;
+ ys[i] = y1;
+ k1[i] = k7;
+ if(typeof event === "function") {
+ var yi,xl = x0,xr = x0+0.5*h,xi;
+ e1 = event(xr,ymid[i-1]);
+ ev = and(lt(e0,0),lt(0,e1));
+ if(!any(ev)) { xl = xr; xr = x0+h; e0 = e1; e1 = event(xr,y1); ev = and(lt(e0,0),lt(0,e1)); }
+ if(any(ev)) {
+ var xc, yc, en,ei;
+ var side=0, sl = 1.0, sr = 1.0;
+ while(1) {
+ if(typeof e0 === "number") xi = (sr*e1*xl-sl*e0*xr)/(sr*e1-sl*e0);
+ else {
+ xi = xr;
+ for(j=e0.length-1;j!==-1;--j) {
+ if(e0[j]<0 && e1[j]>0) xi = min(xi,(sr*e1[j]*xl-sl*e0[j]*xr)/(sr*e1[j]-sl*e0[j]));
+ }
+ }
+ if(xi <= xl || xi >= xr) break;
+ yi = ret._at(xi, i-1);
+ ei = event(xi,yi);
+ en = and(lt(e0,0),lt(0,ei));
+ if(any(en)) {
+ xr = xi;
+ e1 = ei;
+ ev = en;
+ sr = 1.0;
+ if(side === -1) sl *= 0.5;
+ else sl = 1.0;
+ side = -1;
+ } else {
+ xl = xi;
+ e0 = ei;
+ sl = 1.0;
+ if(side === 1) sr *= 0.5;
+ else sr = 1.0;
+ side = 1;
+ }
+ }
+ y1 = ret._at(0.5*(x0+xi),i-1);
+ ret.f[i] = f(xi,yi);
+ ret.x[i] = xi;
+ ret.y[i] = yi;
+ ret.ymid[i-1] = y1;
+ ret.events = ev;
+ ret.iterations = it;
+ return ret;
+ }
+ }
+ x0 += h;
+ y0 = y1;
+ e0 = e1;
+ h = min(0.8*h*pow(tol/erinf,0.25),4*h);
+ }
+ ret.iterations = it;
+ return ret;
+}
+
+// 11. Ax = b
+numeric.LU = function(A, fast) {
+ fast = fast || false;
+
+ var abs = Math.abs;
+ var i, j, k, absAjk, Akk, Ak, Pk, Ai;
+ var max;
+ var n = A.length, n1 = n-1;
+ var P = new Array(n);
+ if(!fast) A = numeric.clone(A);
+
+ for (k = 0; k < n; ++k) {
+ Pk = k;
+ Ak = A[k];
+ max = abs(Ak[k]);
+ for (j = k + 1; j < n; ++j) {
+ absAjk = abs(A[j][k]);
+ if (max < absAjk) {
+ max = absAjk;
+ Pk = j;
+ }
+ }
+ P[k] = Pk;
+
+ if (Pk != k) {
+ A[k] = A[Pk];
+ A[Pk] = Ak;
+ Ak = A[k];
+ }
+
+ Akk = Ak[k];
+
+ for (i = k + 1; i < n; ++i) {
+ A[i][k] /= Akk;
+ }
+
+ for (i = k + 1; i < n; ++i) {
+ Ai = A[i];
+ for (j = k + 1; j < n1; ++j) {
+ Ai[j] -= Ai[k] * Ak[j];
+ ++j;
+ Ai[j] -= Ai[k] * Ak[j];
+ }
+ if(j===n1) Ai[j] -= Ai[k] * Ak[j];
+ }
+ }
+
+ return {
+ LU: A,
+ P: P
+ };
+}
+
+numeric.LUsolve = function LUsolve(LUP, b) {
+ var i, j;
+ var LU = LUP.LU;
+ var n = LU.length;
+ var x = numeric.clone(b);
+ var P = LUP.P;
+ var Pi, LUi, LUii, tmp;
+
+ for (i=n-1;i!==-1;--i) x[i] = b[i];
+ for (i = 0; i < n; ++i) {
+ Pi = P[i];
+ if (P[i] !== i) {
+ tmp = x[i];
+ x[i] = x[Pi];
+ x[Pi] = tmp;
+ }
+
+ LUi = LU[i];
+ for (j = 0; j < i; ++j) {
+ x[i] -= x[j] * LUi[j];
+ }
+ }
+
+ for (i = n - 1; i >= 0; --i) {
+ LUi = LU[i];
+ for (j = i + 1; j < n; ++j) {
+ x[i] -= x[j] * LUi[j];
+ }
+
+ x[i] /= LUi[i];
+ }
+
+ return x;
+}
+
+numeric.solve = function solve(A,b,fast) { return numeric.LUsolve(numeric.LU(A,fast), b); }
+
+// 12. Linear programming
+numeric.echelonize = function echelonize(A) {
+ var s = numeric.dim(A), m = s[0], n = s[1];
+ var I = numeric.identity(m);
+ var P = Array(m);
+ var i,j,k,l,Ai,Ii,Z,a;
+ var abs = Math.abs;
+ var diveq = numeric.diveq;
+ A = numeric.clone(A);
+ for(i=0;ia1) alpha = a1;
+ g = add(c,mul(alpha,p));
+ H = dot(A1,A0);
+ for(i=m-1;i!==-1;--i) H[i][i] += 1;
+ d = solve(H,div(g,alpha),true);
+ var t0 = div(z,dot(A,d));
+ var t = 1.0;
+ for(i=n-1;i!==-1;--i) if(t0[i]<0) t = min(t,-0.999*t0[i]);
+ y = sub(x,mul(d,t));
+ z = sub(b,dot(A,y));
+ if(!all(gt(z,0))) return { solution: x, message: "", iterations: count };
+ x = y;
+ if(alpha=0) unbounded = false;
+ else unbounded = true;
+ }
+ if(unbounded) return { solution: y, message: "Unbounded", iterations: count };
+ }
+ return { solution: x, message: "maximum iteration count exceeded", iterations:count };
+}
+
+numeric._solveLP = function _solveLP(c,A,b,tol,maxit) {
+ var m = c.length, n = b.length,y;
+ var sum = numeric.sum, log = numeric.log, mul = numeric.mul, sub = numeric.sub, dot = numeric.dot, div = numeric.div, add = numeric.add;
+ var c0 = numeric.rep([m],0).concat([1]);
+ var J = numeric.rep([n,1],-1);
+ var A0 = numeric.blockMatrix([[A , J ]]);
+ var b0 = b;
+ var y = numeric.rep([m],0).concat(Math.max(0,numeric.sup(numeric.neg(b)))+1);
+ var x0 = numeric.__solveLP(c0,A0,b0,tol,maxit,y,false);
+ var x = numeric.clone(x0.solution);
+ x.length = m;
+ var foo = numeric.inf(sub(b,dot(A,x)));
+ if(foo<0) { return { solution: NaN, message: "Infeasible", iterations: x0.iterations }; }
+ var ret = numeric.__solveLP(c, A, b, tol, maxit-x0.iterations, x, true);
+ ret.iterations += x0.iterations;
+ return ret;
+};
+
+numeric.solveLP = function solveLP(c,A,b,Aeq,beq,tol,maxit) {
+ if(typeof maxit === "undefined") maxit = 1000;
+ if(typeof tol === "undefined") tol = numeric.epsilon;
+ if(typeof Aeq === "undefined") return numeric._solveLP(c,A,b,tol,maxit);
+ var m = Aeq.length, n = Aeq[0].length, o = A.length;
+ var B = numeric.echelonize(Aeq);
+ var flags = numeric.rep([n],0);
+ var P = B.P;
+ var Q = [];
+ var i;
+ for(i=P.length-1;i!==-1;--i) flags[P[i]] = 1;
+ for(i=n-1;i!==-1;--i) if(flags[i]===0) Q.push(i);
+ var g = numeric.getRange;
+ var I = numeric.linspace(0,m-1), J = numeric.linspace(0,o-1);
+ var Aeq2 = g(Aeq,I,Q), A1 = g(A,J,P), A2 = g(A,J,Q), dot = numeric.dot, sub = numeric.sub;
+ var A3 = dot(A1,B.I);
+ var A4 = sub(A2,dot(A3,Aeq2)), b4 = sub(b,dot(A3,beq));
+ var c1 = Array(P.length), c2 = Array(Q.length);
+ for(i=P.length-1;i!==-1;--i) c1[i] = c[P[i]];
+ for(i=Q.length-1;i!==-1;--i) c2[i] = c[Q[i]];
+ var c4 = sub(c2,dot(c1,dot(B.I,Aeq2)));
+ var S = numeric._solveLP(c4,A4,b4,tol,maxit);
+ var x2 = S.solution;
+ if(x2!==x2) return S;
+ var x1 = dot(B.I,sub(beq,dot(Aeq2,x2)));
+ var x = Array(c.length);
+ for(i=P.length-1;i!==-1;--i) x[P[i]] = x1[i];
+ for(i=Q.length-1;i!==-1;--i) x[Q[i]] = x2[i];
+ return { solution: x, message:S.message, iterations: S.iterations };
+}
+
+numeric.MPStoLP = function MPStoLP(MPS) {
+ if(MPS instanceof String) { MPS.split('\n'); }
+ var state = 0;
+ var states = ['Initial state','NAME','ROWS','COLUMNS','RHS','BOUNDS','ENDATA'];
+ var n = MPS.length;
+ var i,j,z,N=0,rows = {}, sign = [], rl = 0, vars = {}, nv = 0;
+ var name;
+ var c = [], A = [], b = [];
+ function err(e) { throw new Error('MPStoLP: '+e+'\nLine '+i+': '+MPS[i]+'\nCurrent state: '+states[state]+'\n'); }
+ for(i=0;i
+//
+// Math.seedrandom('yipee'); Sets Math.random to a function that is
+// initialized using the given explicit seed.
+//
+// Math.seedrandom(); Sets Math.random to a function that is
+// seeded using the current time, dom state,
+// and other accumulated local entropy.
+// The generated seed string is returned.
+//
+// Math.seedrandom('yowza', true);
+// Seeds using the given explicit seed mixed
+// together with accumulated entropy.
+//
+//
+// Seeds using physical random bits downloaded
+// from random.org.
+//
+// Seeds using urandom bits from call.jsonlib.com,
+// which is faster than random.org.
+//
+// Examples:
+//
+// Math.seedrandom("hello"); // Use "hello" as the seed.
+// document.write(Math.random()); // Always 0.5463663768140734
+// document.write(Math.random()); // Always 0.43973793770592234
+// var rng1 = Math.random; // Remember the current prng.
+//
+// var autoseed = Math.seedrandom(); // New prng with an automatic seed.
+// document.write(Math.random()); // Pretty much unpredictable.
+//
+// Math.random = rng1; // Continue "hello" prng sequence.
+// document.write(Math.random()); // Always 0.554769432473455
+//
+// Math.seedrandom(autoseed); // Restart at the previous seed.
+// document.write(Math.random()); // Repeat the 'unpredictable' value.
+//
+// Notes:
+//
+// Each time seedrandom('arg') is called, entropy from the passed seed
+// is accumulated in a pool to help generate future seeds for the
+// zero-argument form of Math.seedrandom, so entropy can be injected over
+// time by calling seedrandom with explicit data repeatedly.
+//
+// On speed - This javascript implementation of Math.random() is about
+// 3-10x slower than the built-in Math.random() because it is not native
+// code, but this is typically fast enough anyway. Seeding is more expensive,
+// especially if you use auto-seeding. Some details (timings on Chrome 4):
+//
+// Our Math.random() - avg less than 0.002 milliseconds per call
+// seedrandom('explicit') - avg less than 0.5 milliseconds per call
+// seedrandom('explicit', true) - avg less than 2 milliseconds per call
+// seedrandom() - avg about 38 milliseconds per call
+//
+// LICENSE (BSD):
+//
+// Copyright 2010 David Bau, all rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. Neither the name of this module nor the names of its contributors may
+// be used to endorse or promote products derived from this software
+// without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+/**
+ * All code is in an anonymous closure to keep the global namespace clean.
+ *
+ * @param {number=} overflow
+ * @param {number=} startdenom
+ */
+
+// Patched by Seb so that seedrandom.js does not pollute the Math object.
+// My tests suggest that doing Math.trouble = 1 makes Math lookups about 5%
+// slower.
+numeric.seedrandom = { pow:Math.pow, random:Math.random };
+
+(function (pool, math, width, chunks, significance, overflow, startdenom) {
+
+
+//
+// seedrandom()
+// This is the seedrandom function described above.
+//
+math['seedrandom'] = function seedrandom(seed, use_entropy) {
+ var key = [];
+ var arc4;
+
+ // Flatten the seed string or build one from local entropy if needed.
+ seed = mixkey(flatten(
+ use_entropy ? [seed, pool] :
+ arguments.length ? seed :
+ [new Date().getTime(), pool, window], 3), key);
+
+ // Use the seed to initialize an ARC4 generator.
+ arc4 = new ARC4(key);
+
+ // Mix the randomness into accumulated entropy.
+ mixkey(arc4.S, pool);
+
+ // Override Math.random
+
+ // This function returns a random double in [0, 1) that contains
+ // randomness in every bit of the mantissa of the IEEE 754 value.
+
+ math['random'] = function random() { // Closure to return a random double:
+ var n = arc4.g(chunks); // Start with a numerator n < 2 ^ 48
+ var d = startdenom; // and denominator d = 2 ^ 48.
+ var x = 0; // and no 'extra last byte'.
+ while (n < significance) { // Fill up all significant digits by
+ n = (n + x) * width; // shifting numerator and
+ d *= width; // denominator and generating a
+ x = arc4.g(1); // new least-significant-byte.
+ }
+ while (n >= overflow) { // To avoid rounding up, before adding
+ n /= 2; // last byte, shift everything
+ d /= 2; // right using integer math until
+ x >>>= 1; // we have exactly the desired bits.
+ }
+ return (n + x) / d; // Form the number within [0, 1).
+ };
+
+ // Return the seed that was used
+ return seed;
+};
+
+//
+// ARC4
+//
+// An ARC4 implementation. The constructor takes a key in the form of
+// an array of at most (width) integers that should be 0 <= x < (width).
+//
+// The g(count) method returns a pseudorandom integer that concatenates
+// the next (count) outputs from ARC4. Its return value is a number x
+// that is in the range 0 <= x < (width ^ count).
+//
+/** @constructor */
+function ARC4(key) {
+ var t, u, me = this, keylen = key.length;
+ var i = 0, j = me.i = me.j = me.m = 0;
+ me.S = [];
+ me.c = [];
+
+ // The empty key [] is treated as [0].
+ if (!keylen) { key = [keylen++]; }
+
+ // Set up S using the standard key scheduling algorithm.
+ while (i < width) { me.S[i] = i++; }
+ for (i = 0; i < width; i++) {
+ t = me.S[i];
+ j = lowbits(j + t + key[i % keylen]);
+ u = me.S[j];
+ me.S[i] = u;
+ me.S[j] = t;
+ }
+
+ // The "g" method returns the next (count) outputs as one number.
+ me.g = function getnext(count) {
+ var s = me.S;
+ var i = lowbits(me.i + 1); var t = s[i];
+ var j = lowbits(me.j + t); var u = s[j];
+ s[i] = u;
+ s[j] = t;
+ var r = s[lowbits(t + u)];
+ while (--count) {
+ i = lowbits(i + 1); t = s[i];
+ j = lowbits(j + t); u = s[j];
+ s[i] = u;
+ s[j] = t;
+ r = r * width + s[lowbits(t + u)];
+ }
+ me.i = i;
+ me.j = j;
+ return r;
+ };
+ // For robust unpredictability discard an initial batch of values.
+ // See http://www.rsa.com/rsalabs/node.asp?id=2009
+ me.g(width);
+}
+
+//
+// flatten()
+// Converts an object tree to nested arrays of strings.
+//
+/** @param {Object=} result
+ * @param {string=} prop
+ * @param {string=} typ */
+function flatten(obj, depth, result, prop, typ) {
+ result = [];
+ typ = typeof(obj);
+ if (depth && typ == 'object') {
+ for (prop in obj) {
+ if (prop.indexOf('S') < 5) { // Avoid FF3 bug (local/sessionStorage)
+ try { result.push(flatten(obj[prop], depth - 1)); } catch (e) {}
+ }
+ }
+ }
+ return (result.length ? result : obj + (typ != 'string' ? '\0' : ''));
+}
+
+//
+// mixkey()
+// Mixes a string seed into a key that is an array of integers, and
+// returns a shortened string seed that is equivalent to the result key.
+//
+/** @param {number=} smear
+ * @param {number=} j */
+function mixkey(seed, key, smear, j) {
+ seed += ''; // Ensure the seed is a string
+ smear = 0;
+ for (j = 0; j < seed.length; j++) {
+ key[lowbits(j)] =
+ lowbits((smear ^= key[lowbits(j)] * 19) + seed.charCodeAt(j));
+ }
+ seed = '';
+ for (j in key) { seed += String.fromCharCode(key[j]); }
+ return seed;
+}
+
+//
+// lowbits()
+// A quick "n mod width" for width a power of 2.
+//
+function lowbits(n) { return n & (width - 1); }
+
+//
+// The following constants are related to IEEE 754 limits.
+//
+startdenom = math.pow(width, chunks);
+significance = math.pow(2, significance);
+overflow = significance * 2;
+
+//
+// When seedrandom.js is loaded, we immediately mix a few bits
+// from the built-in RNG into the entropy pool. Because we do
+// not want to intefere with determinstic PRNG state later,
+// seedrandom will not call math.random on its own again after
+// initialization.
+//
+mixkey(math.random(), pool);
+
+// End anonymous scope, and pass initial values.
+}(
+ [], // pool: entropy pool starts empty
+ numeric.seedrandom, // math: package containing random, pow, and seedrandom
+ 256, // width: each RC4 output is 0 <= x < 256
+ 6, // chunks: at least six RC4 outputs for each double
+ 52 // significance: there are 52 significant digits in a double
+ ));
+/* This file is a slightly modified version of quadprog.js from Alberto Santini.
+ * It has been slightly modified by Sébastien Loisel to make sure that it handles
+ * 0-based Arrays instead of 1-based Arrays.
+ * License is in resources/LICENSE.quadprog */
+(function(exports) {
+
+function base0to1(A) {
+ if(typeof A !== "object") { return A; }
+ var ret = [], i,n=A.length;
+ for(i=0;i meq) {
+ work[l] = sum;
+ } else {
+ work[l] = -Math.abs(sum);
+ if (sum > 0) {
+ for (j = 1; j <= n; j = j + 1) {
+ amat[j][i] = -amat[j][i];
+ }
+ bvec[i] = -bvec[i];
+ }
+ }
+ }
+
+ for (i = 1; i <= nact; i = i + 1) {
+ work[iwsv + iact[i]] = 0;
+ }
+
+ nvl = 0;
+ temp = 0;
+ for (i = 1; i <= q; i = i + 1) {
+ if (work[iwsv + i] < temp * work[iwnbv + i]) {
+ nvl = i;
+ temp = work[iwsv + i] / work[iwnbv + i];
+ }
+ }
+ if (nvl === 0) {
+ return 999;
+ }
+
+ return 0;
+ }
+
+ function fn_goto_55() {
+ for (i = 1; i <= n; i = i + 1) {
+ sum = 0;
+ for (j = 1; j <= n; j = j + 1) {
+ sum = sum + dmat[j][i] * amat[j][nvl];
+ }
+ work[i] = sum;
+ }
+
+ l1 = iwzv;
+ for (i = 1; i <= n; i = i + 1) {
+ work[l1 + i] = 0;
+ }
+ for (j = nact + 1; j <= n; j = j + 1) {
+ for (i = 1; i <= n; i = i + 1) {
+ work[l1 + i] = work[l1 + i] + dmat[i][j] * work[j];
+ }
+ }
+
+ t1inf = true;
+ for (i = nact; i >= 1; i = i - 1) {
+ sum = work[i];
+ l = iwrm + (i * (i + 3)) / 2;
+ l1 = l - i;
+ for (j = i + 1; j <= nact; j = j + 1) {
+ sum = sum - work[l] * work[iwrv + j];
+ l = l + j;
+ }
+ sum = sum / work[l1];
+ work[iwrv + i] = sum;
+ if (iact[i] < meq) {
+ // continue;
+ break;
+ }
+ if (sum < 0) {
+ // continue;
+ break;
+ }
+ t1inf = false;
+ it1 = i;
+ }
+
+ if (!t1inf) {
+ t1 = work[iwuv + it1] / work[iwrv + it1];
+ for (i = 1; i <= nact; i = i + 1) {
+ if (iact[i] < meq) {
+ // continue;
+ break;
+ }
+ if (work[iwrv + i] < 0) {
+ // continue;
+ break;
+ }
+ temp = work[iwuv + i] / work[iwrv + i];
+ if (temp < t1) {
+ t1 = temp;
+ it1 = i;
+ }
+ }
+ }
+
+ sum = 0;
+ for (i = iwzv + 1; i <= iwzv + n; i = i + 1) {
+ sum = sum + work[i] * work[i];
+ }
+ if (Math.abs(sum) <= vsmall) {
+ if (t1inf) {
+ ierr[1] = 1;
+ // GOTO 999
+ return 999;
+ } else {
+ for (i = 1; i <= nact; i = i + 1) {
+ work[iwuv + i] = work[iwuv + i] - t1 * work[iwrv + i];
+ }
+ work[iwuv + nact + 1] = work[iwuv + nact + 1] + t1;
+ // GOTO 700
+ return 700;
+ }
+ } else {
+ sum = 0;
+ for (i = 1; i <= n; i = i + 1) {
+ sum = sum + work[iwzv + i] * amat[i][nvl];
+ }
+ tt = -work[iwsv + nvl] / sum;
+ t2min = true;
+ if (!t1inf) {
+ if (t1 < tt) {
+ tt = t1;
+ t2min = false;
+ }
+ }
+
+ for (i = 1; i <= n; i = i + 1) {
+ sol[i] = sol[i] + tt * work[iwzv + i];
+ if (Math.abs(sol[i]) < vsmall) {
+ sol[i] = 0;
+ }
+ }
+
+ crval[1] = crval[1] + tt * sum * (tt / 2 + work[iwuv + nact + 1]);
+ for (i = 1; i <= nact; i = i + 1) {
+ work[iwuv + i] = work[iwuv + i] - tt * work[iwrv + i];
+ }
+ work[iwuv + nact + 1] = work[iwuv + nact + 1] + tt;
+
+ if (t2min) {
+ nact = nact + 1;
+ iact[nact] = nvl;
+
+ l = iwrm + ((nact - 1) * nact) / 2 + 1;
+ for (i = 1; i <= nact - 1; i = i + 1) {
+ work[l] = work[i];
+ l = l + 1;
+ }
+
+ if (nact === n) {
+ work[l] = work[n];
+ } else {
+ for (i = n; i >= nact + 1; i = i - 1) {
+ if (work[i] === 0) {
+ // continue;
+ break;
+ }
+ gc = Math.max(Math.abs(work[i - 1]), Math.abs(work[i]));
+ gs = Math.min(Math.abs(work[i - 1]), Math.abs(work[i]));
+ if (work[i - 1] >= 0) {
+ temp = Math.abs(gc * Math.sqrt(1 + gs * gs / (gc * gc)));
+ } else {
+ temp = -Math.abs(gc * Math.sqrt(1 + gs * gs / (gc * gc)));
+ }
+ gc = work[i - 1] / temp;
+ gs = work[i] / temp;
+
+ if (gc === 1) {
+ // continue;
+ break;
+ }
+ if (gc === 0) {
+ work[i - 1] = gs * temp;
+ for (j = 1; j <= n; j = j + 1) {
+ temp = dmat[j][i - 1];
+ dmat[j][i - 1] = dmat[j][i];
+ dmat[j][i] = temp;
+ }
+ } else {
+ work[i - 1] = temp;
+ nu = gs / (1 + gc);
+ for (j = 1; j <= n; j = j + 1) {
+ temp = gc * dmat[j][i - 1] + gs * dmat[j][i];
+ dmat[j][i] = nu * (dmat[j][i - 1] + temp) - dmat[j][i];
+ dmat[j][i - 1] = temp;
+
+ }
+ }
+ }
+ work[l] = work[nact];
+ }
+ } else {
+ sum = -bvec[nvl];
+ for (j = 1; j <= n; j = j + 1) {
+ sum = sum + sol[j] * amat[j][nvl];
+ }
+ if (nvl > meq) {
+ work[iwsv + nvl] = sum;
+ } else {
+ work[iwsv + nvl] = -Math.abs(sum);
+ if (sum > 0) {
+ for (j = 1; j <= n; j = j + 1) {
+ amat[j][nvl] = -amat[j][nvl];
+ }
+ bvec[nvl] = -bvec[nvl];
+ }
+ }
+ // GOTO 700
+ return 700;
+ }
+ }
+
+ return 0;
+ }
+
+ function fn_goto_797() {
+ l = iwrm + (it1 * (it1 + 1)) / 2 + 1;
+ l1 = l + it1;
+ if (work[l1] === 0) {
+ // GOTO 798
+ return 798;
+ }
+ gc = Math.max(Math.abs(work[l1 - 1]), Math.abs(work[l1]));
+ gs = Math.min(Math.abs(work[l1 - 1]), Math.abs(work[l1]));
+ if (work[l1 - 1] >= 0) {
+ temp = Math.abs(gc * Math.sqrt(1 + gs * gs / (gc * gc)));
+ } else {
+ temp = -Math.abs(gc * Math.sqrt(1 + gs * gs / (gc * gc)));
+ }
+ gc = work[l1 - 1] / temp;
+ gs = work[l1] / temp;
+
+ if (gc === 1) {
+ // GOTO 798
+ return 798;
+ }
+ if (gc === 0) {
+ for (i = it1 + 1; i <= nact; i = i + 1) {
+ temp = work[l1 - 1];
+ work[l1 - 1] = work[l1];
+ work[l1] = temp;
+ l1 = l1 + i;
+ }
+ for (i = 1; i <= n; i = i + 1) {
+ temp = dmat[i][it1];
+ dmat[i][it1] = dmat[i][it1 + 1];
+ dmat[i][it1 + 1] = temp;
+ }
+ } else {
+ nu = gs / (1 + gc);
+ for (i = it1 + 1; i <= nact; i = i + 1) {
+ temp = gc * work[l1 - 1] + gs * work[l1];
+ work[l1] = nu * (work[l1 - 1] + temp) - work[l1];
+ work[l1 - 1] = temp;
+ l1 = l1 + i;
+ }
+ for (i = 1; i <= n; i = i + 1) {
+ temp = gc * dmat[i][it1] + gs * dmat[i][it1 + 1];
+ dmat[i][it1 + 1] = nu * (dmat[i][it1] + temp) - dmat[i][it1 + 1];
+ dmat[i][it1] = temp;
+ }
+ }
+
+ return 0;
+ }
+
+ function fn_goto_798() {
+ l1 = l - it1;
+ for (i = 1; i <= it1; i = i + 1) {
+ work[l1] = work[l];
+ l = l + 1;
+ l1 = l1 + 1;
+ }
+
+ work[iwuv + it1] = work[iwuv + it1 + 1];
+ iact[it1] = iact[it1 + 1];
+ it1 = it1 + 1;
+ if (it1 < nact) {
+ // GOTO 797
+ return 797;
+ }
+
+ return 0;
+ }
+
+ function fn_goto_799() {
+ work[iwuv + nact] = work[iwuv + nact + 1];
+ work[iwuv + nact + 1] = 0;
+ iact[nact] = 0;
+ nact = nact - 1;
+ iter[2] = iter[2] + 1;
+
+ return 0;
+ }
+
+ go = 0;
+ while (true) {
+ go = fn_goto_50();
+ if (go === 999) {
+ return;
+ }
+ while (true) {
+ go = fn_goto_55();
+ if (go === 0) {
+ break;
+ }
+ if (go === 999) {
+ return;
+ }
+ if (go === 700) {
+ if (it1 === nact) {
+ fn_goto_799();
+ } else {
+ while (true) {
+ fn_goto_797();
+ go = fn_goto_798();
+ if (go !== 797) {
+ break;
+ }
+ }
+ fn_goto_799();
+ }
+ }
+ }
+ }
+
+}
+
+function solveQP(Dmat, dvec, Amat, bvec, meq, factorized) {
+ Dmat = base0to1(Dmat);
+ dvec = base0to1(dvec);
+ Amat = base0to1(Amat);
+ var i, n, q,
+ nact, r,
+ crval = [], iact = [], sol = [], work = [], iter = [],
+ message;
+
+ meq = meq || 0;
+ factorized = factorized ? base0to1(factorized) : [undefined, 0];
+ bvec = bvec ? base0to1(bvec) : [];
+
+ // In Fortran the array index starts from 1
+ n = Dmat.length - 1;
+ q = Amat[1].length - 1;
+
+ if (!bvec) {
+ for (i = 1; i <= q; i = i + 1) {
+ bvec[i] = 0;
+ }
+ }
+ for (i = 1; i <= q; i = i + 1) {
+ iact[i] = 0;
+ }
+ nact = 0;
+ r = Math.min(n, q);
+ for (i = 1; i <= n; i = i + 1) {
+ sol[i] = 0;
+ }
+ crval[1] = 0;
+ for (i = 1; i <= (2 * n + (r * (r + 5)) / 2 + 2 * q + 1); i = i + 1) {
+ work[i] = 0;
+ }
+ for (i = 1; i <= 2; i = i + 1) {
+ iter[i] = 0;
+ }
+
+ qpgen2(Dmat, dvec, n, n, sol, crval, Amat,
+ bvec, n, q, meq, iact, nact, iter, work, factorized);
+
+ message = "";
+ if (factorized[1] === 1) {
+ message = "constraints are inconsistent, no solution!";
+ }
+ if (factorized[1] === 2) {
+ message = "matrix D in quadratic function is not positive definite!";
+ }
+
+ return {
+ solution: base1to0(sol),
+ value: base1to0(crval),
+ unconstrained_solution: base1to0(dvec),
+ iterations: base1to0(iter),
+ iact: base1to0(iact),
+ message: message
+ };
+}
+exports.solveQP = solveQP;
+}(numeric));
+/*
+Shanti Rao sent me this routine by private email. I had to modify it
+slightly to work on Arrays instead of using a Matrix object.
+It is apparently translated from http://stitchpanorama.sourceforge.net/Python/svd.py
+*/
+
+numeric.svd= function svd(A) {
+ var temp;
+//Compute the thin SVD from G. H. Golub and C. Reinsch, Numer. Math. 14, 403-420 (1970)
+ var prec= numeric.epsilon; //Math.pow(2,-52) // assumes double prec
+ var tolerance= 1.e-64/prec;
+ var itmax= 50;
+ var c=0;
+ var i=0;
+ var j=0;
+ var k=0;
+ var l=0;
+
+ var u= numeric.clone(A);
+ var m= u.length;
+
+ var n= u[0].length;
+
+ if (m < n) throw "Need more rows than columns"
+
+ var e = new Array(n);
+ var q = new Array(n);
+ for (i=0; i b)
+ return a*Math.sqrt(1.0+(b*b/a/a))
+ else if (b == 0.0)
+ return a
+ return b*Math.sqrt(1.0+(a*a/b/b))
+ }
+
+ //Householder's reduction to bidiagonal form
+
+ var f= 0.0;
+ var g= 0.0;
+ var h= 0.0;
+ var x= 0.0;
+ var y= 0.0;
+ var z= 0.0;
+ var s= 0.0;
+
+ for (i=0; i < n; i++)
+ {
+ e[i]= g;
+ s= 0.0;
+ l= i+1;
+ for (j=i; j < m; j++)
+ s += (u[j][i]*u[j][i]);
+ if (s <= tolerance)
+ g= 0.0;
+ else
+ {
+ f= u[i][i];
+ g= Math.sqrt(s);
+ if (f >= 0.0) g= -g;
+ h= f*g-s
+ u[i][i]=f-g;
+ for (j=l; j < n; j++)
+ {
+ s= 0.0
+ for (k=i; k < m; k++)
+ s += u[k][i]*u[k][j]
+ f= s/h
+ for (k=i; k < m; k++)
+ u[k][j]+=f*u[k][i]
+ }
+ }
+ q[i]= g
+ s= 0.0
+ for (j=l; j < n; j++)
+ s= s + u[i][j]*u[i][j]
+ if (s <= tolerance)
+ g= 0.0
+ else
+ {
+ f= u[i][i+1]
+ g= Math.sqrt(s)
+ if (f >= 0.0) g= -g
+ h= f*g - s
+ u[i][i+1] = f-g;
+ for (j=l; j < n; j++) e[j]= u[i][j]/h
+ for (j=l; j < m; j++)
+ {
+ s=0.0
+ for (k=l; k < n; k++)
+ s += (u[j][k]*u[i][k])
+ for (k=l; k < n; k++)
+ u[j][k]+=s*e[k]
+ }
+ }
+ y= Math.abs(q[i])+Math.abs(e[i])
+ if (y>x)
+ x=y
+ }
+
+ // accumulation of right hand gtransformations
+ for (i=n-1; i != -1; i+= -1)
+ {
+ if (g != 0.0)
+ {
+ h= g*u[i][i+1]
+ for (j=l; j < n; j++)
+ v[j][i]=u[i][j]/h
+ for (j=l; j < n; j++)
+ {
+ s=0.0
+ for (k=l; k < n; k++)
+ s += u[i][k]*v[k][j]
+ for (k=l; k < n; k++)
+ v[k][j]+=(s*v[k][i])
+ }
+ }
+ for (j=l; j < n; j++)
+ {
+ v[i][j] = 0;
+ v[j][i] = 0;
+ }
+ v[i][i] = 1;
+ g= e[i]
+ l= i
+ }
+
+ // accumulation of left hand transformations
+ for (i=n-1; i != -1; i+= -1)
+ {
+ l= i+1
+ g= q[i]
+ for (j=l; j < n; j++)
+ u[i][j] = 0;
+ if (g != 0.0)
+ {
+ h= u[i][i]*g
+ for (j=l; j < n; j++)
+ {
+ s=0.0
+ for (k=l; k < m; k++) s += u[k][i]*u[k][j];
+ f= s/h
+ for (k=i; k < m; k++) u[k][j]+=f*u[k][i];
+ }
+ for (j=i; j < m; j++) u[j][i] = u[j][i]/g;
+ }
+ else
+ for (j=i; j < m; j++) u[j][i] = 0;
+ u[i][i] += 1;
+ }
+
+ // diagonalization of the bidiagonal form
+ prec= prec*x
+ for (k=n-1; k != -1; k+= -1)
+ {
+ for (var iteration=0; iteration < itmax; iteration++)
+ { // test f splitting
+ var test_convergence = false
+ for (l=k; l != -1; l+= -1)
+ {
+ if (Math.abs(e[l]) <= prec)
+ { test_convergence= true
+ break
+ }
+ if (Math.abs(q[l-1]) <= prec)
+ break
+ }
+ if (!test_convergence)
+ { // cancellation of e[l] if l>0
+ c= 0.0
+ s= 1.0
+ var l1= l-1
+ for (i =l; i= itmax-1)
+ throw 'Error: no convergence.'
+ // shift from bottom 2x2 minor
+ x= q[l]
+ y= q[k-1]
+ g= e[k-1]
+ h= e[k]
+ f= ((y-z)*(y+z)+(g-h)*(g+h))/(2.0*h*y)
+ g= pythag(f,1.0)
+ if (f < 0.0)
+ f= ((x-z)*(x+z)+h*(y/(f-g)-h))/x
+ else
+ f= ((x-z)*(x+z)+h*(y/(f+g)-h))/x
+ // next QR transformation
+ c= 1.0
+ s= 1.0
+ for (i=l+1; i< k+1; i++)
+ {
+ g= e[i]
+ y= q[i]
+ h= s*g
+ g= c*g
+ z= pythag(f,h)
+ e[i-1]= z
+ c= f/z
+ s= h/z
+ f= x*c+g*s
+ g= -x*s+g*c
+ h= y*s
+ y= y*c
+ for (j=0; j < n; j++)
+ {
+ x= v[j][i-1]
+ z= v[j][i]
+ v[j][i-1] = x*c+z*s
+ v[j][i] = -x*s+z*c
+ }
+ z= pythag(f,h)
+ q[i-1]= z
+ c= f/z
+ s= h/z
+ f= c*g+s*y
+ x= -s*g+c*y
+ for (j=0; j < m; j++)
+ {
+ y= u[j][i-1]
+ z= u[j][i]
+ u[j][i-1] = y*c+z*s
+ u[j][i] = -y*s+z*c
+ }
+ }
+ e[l]= 0.0
+ e[k]= f
+ q[k]= x
+ }
+ }
+
+ //vt= transpose(v)
+ //return (u,q,vt)
+ for (i=0;i= 0; j--)
+ {
+ if (q[j] < q[i])
+ {
+ // writeln(i,'-',j)
+ c = q[j]
+ q[j] = q[i]
+ q[i] = c
+ for(k=0;k tensor.name) :
+ Object.keys(tensors);
+ for (let i = 0; i < names.length; ++i) {
+ const name = names[i];
+ const t = Array.isArray(tensors) ? tensors[i].tensor : tensors[name];
+ if (t.dtype !== 'float32' && t.dtype !== 'int32' && t.dtype !== 'bool' &&
+ t.dtype !== 'string' && t.dtype !== 'complex64') {
+ throw new Error(`Unsupported dtype in weight '${name}': ${t.dtype}`);
+ }
+ const spec = { name, shape: t.shape, dtype: t.dtype };
+ if (t.dtype === 'string') {
+ const utf8bytes = new Promise(async (resolve) => {
+ const vals = await t.bytes();
+ const totalNumBytes = vals.reduce((p, c) => p + c.length, 0) +
+ NUM_BYTES_STRING_LENGTH * vals.length;
+ const bytes = new Uint8Array(totalNumBytes);
+ let offset = 0;
+ for (let i = 0; i < vals.length; i++) {
+ const val = vals[i];
+ const bytesOfLength = new Uint8Array(new Uint32Array([val.length]).buffer);
+ bytes.set(bytesOfLength, offset);
+ offset += NUM_BYTES_STRING_LENGTH;
+ bytes.set(val, offset);
+ offset += val.length;
+ }
+ resolve(bytes);
+ });
+ dataPromises.push(utf8bytes);
+ }
+ else {
+ dataPromises.push(t.data());
+ }
+ if (group != null) {
+ spec.group = group;
+ }
+ specs.push(spec);
+ }
+ const tensorValues = await Promise.all(dataPromises);
+ return { data: concatenateTypedArrays(tensorValues), specs };
+}
+/**
+ * Decode flat ArrayBuffer as weights.
+ *
+ * This function does not handle sharding.
+ *
+ * This function is the reverse of `encodeWeights`.
+ *
+ * @param buffer A flat ArrayBuffer carrying the binary values of the tensors
+ * concatenated in the order specified in `specs`.
+ * @param specs Specifications of the names, dtypes and shapes of the tensors
+ * whose value are encoded by `buffer`.
+ * @return A map from tensor name to tensor value, with the names corresponding
+ * to names in `specs`.
+ * @throws Error, if any of the tensors has unsupported dtype.
+ */
+function decodeWeights(buffer, specs) {
+ // TODO(adarob, cais): Support quantization.
+ const out = {};
+ let float16Decode;
+ let offset = 0;
+ for (const spec of specs) {
+ const name = spec.name;
+ const dtype = spec.dtype;
+ const shape = spec.shape;
+ const size = Object(_util__WEBPACK_IMPORTED_MODULE_2__["sizeFromShape"])(shape);
+ let values;
+ if ('quantization' in spec) {
+ const quantization = spec.quantization;
+ if (quantization.dtype === 'uint8' || quantization.dtype === 'uint16') {
+ if (!('min' in quantization && 'scale' in quantization)) {
+ throw new Error(`Weight ${spec.name} with quantization ${quantization.dtype} ` +
+ `doesn't have corresponding metadata min and scale.`);
+ }
+ }
+ else if (quantization.dtype === 'float16') {
+ if (dtype !== 'float32') {
+ throw new Error(`Weight ${spec.name} is quantized with ${quantization.dtype} ` +
+ `which only supports weights of type float32 not ${dtype}.`);
+ }
+ }
+ else {
+ throw new Error(`Weight ${spec.name} has unknown ` +
+ `quantization dtype ${quantization.dtype}. ` +
+ `Supported quantization dtypes are: ` +
+ `'uint8', 'uint16', and 'float16'.`);
+ }
+ const quantizationSizeFactor = _types__WEBPACK_IMPORTED_MODULE_3__[/* DTYPE_VALUE_SIZE_MAP */ "a"][quantization.dtype];
+ const byteBuffer = buffer.slice(offset, offset + size * quantizationSizeFactor);
+ const quantizedArray = (quantization.dtype === 'uint8') ?
+ new Uint8Array(byteBuffer) :
+ new Uint16Array(byteBuffer);
+ if (dtype === 'float32') {
+ if (quantization.dtype === 'uint8' || quantization.dtype === 'uint16') {
+ values = new Float32Array(quantizedArray.length);
+ for (let i = 0; i < quantizedArray.length; i++) {
+ const v = quantizedArray[i];
+ values[i] = v * quantization.scale + quantization.min;
+ }
+ }
+ else if (quantization.dtype === 'float16') {
+ if (float16Decode === undefined) {
+ float16Decode = getFloat16Decoder();
+ }
+ values = float16Decode(quantizedArray);
+ }
+ else {
+ throw new Error(`Unsupported quantization type ${quantization.dtype} ` +
+ `for weight type float32.`);
+ }
+ }
+ else if (dtype === 'int32') {
+ if (quantization.dtype !== 'uint8' && quantization.dtype !== 'uint16') {
+ throw new Error(`Unsupported quantization type ${quantization.dtype} ` +
+ `for weight type int32.`);
+ }
+ values = new Int32Array(quantizedArray.length);
+ for (let i = 0; i < quantizedArray.length; i++) {
+ const v = quantizedArray[i];
+ values[i] = Math.round(v * quantization.scale + quantization.min);
+ }
+ }
+ else {
+ throw new Error(`Unsupported dtype in weight '${name}': ${dtype}`);
+ }
+ offset += size * quantizationSizeFactor;
+ }
+ else if (dtype === 'string') {
+ const size = Object(_util__WEBPACK_IMPORTED_MODULE_2__["sizeFromShape"])(spec.shape);
+ values = [];
+ for (let i = 0; i < size; i++) {
+ const byteLength = new Uint32Array(buffer.slice(offset, offset + NUM_BYTES_STRING_LENGTH))[0];
+ offset += NUM_BYTES_STRING_LENGTH;
+ const bytes = new Uint8Array(buffer.slice(offset, offset + byteLength));
+ values.push(bytes);
+ offset += byteLength;
+ }
+ }
+ else {
+ const dtypeFactor = _types__WEBPACK_IMPORTED_MODULE_3__[/* DTYPE_VALUE_SIZE_MAP */ "a"][dtype];
+ const byteBuffer = buffer.slice(offset, offset + size * dtypeFactor);
+ if (dtype === 'float32') {
+ values = new Float32Array(byteBuffer);
+ }
+ else if (dtype === 'int32') {
+ values = new Int32Array(byteBuffer);
+ }
+ else if (dtype === 'bool') {
+ values = new Uint8Array(byteBuffer);
+ }
+ else if (dtype === 'complex64') {
+ values = new Float32Array(byteBuffer);
+ const real = new Float32Array(values.length / 2);
+ const image = new Float32Array(values.length / 2);
+ for (let i = 0; i < real.length; i++) {
+ real[i] = values[i * 2];
+ image[i] = values[i * 2 + 1];
+ }
+ const realTensor = Object(_ops_tensor_ops__WEBPACK_IMPORTED_MODULE_1__[/* tensor */ "f"])(real, shape, 'float32');
+ const imageTensor = Object(_ops_tensor_ops__WEBPACK_IMPORTED_MODULE_1__[/* tensor */ "f"])(image, shape, 'float32');
+ out[name] = Object(_ops_complex__WEBPACK_IMPORTED_MODULE_0__[/* complex */ "a"])(realTensor, imageTensor);
+ }
+ else {
+ throw new Error(`Unsupported dtype in weight '${name}': ${dtype}`);
+ }
+ offset += size * dtypeFactor;
+ }
+ if (dtype !== 'complex64') {
+ out[name] = Object(_ops_tensor_ops__WEBPACK_IMPORTED_MODULE_1__[/* tensor */ "f"])(values, shape, dtype);
+ }
+ }
+ return out;
+}
+/**
+ * Concatenate TypedArrays into an ArrayBuffer.
+ */
+function concatenateTypedArrays(xs) {
+ // TODO(adarob, cais): Support quantization.
+ if (xs === null) {
+ throw new Error(`Invalid input value: ${JSON.stringify(xs)}`);
+ }
+ let totalByteLength = 0;
+ // `normalizedXs` is here for this reason: a `TypedArray`'s `buffer'
+ // can have a different byte length from that of the `TypedArray` itself,
+ // for example, when the `TypedArray` is created from an offset in an
+ // `ArrayBuffer`. `normliazedXs` holds `TypedArray`s whose `buffer`s match
+ // the `TypedArray` in byte length. If an element of `xs` does not show
+ // this property, a new `TypedArray` that satisfy this property will be
+ // constructed and pushed into `normalizedXs`.
+ const normalizedXs = [];
+ xs.forEach((x) => {
+ totalByteLength += x.byteLength;
+ // tslint:disable:no-any
+ normalizedXs.push(x.byteLength === x.buffer.byteLength ? x :
+ new x.constructor(x));
+ if (!(x instanceof Float32Array || x instanceof Int32Array ||
+ x instanceof Uint8Array)) {
+ throw new Error(`Unsupported TypedArray subtype: ${x.constructor.name}`);
+ }
+ // tslint:enable:no-any
+ });
+ const y = new Uint8Array(totalByteLength);
+ let offset = 0;
+ normalizedXs.forEach((x) => {
+ y.set(new Uint8Array(x.buffer), offset);
+ offset += x.byteLength;
+ });
+ return y.buffer;
+}
+// Use Buffer on Node.js instead of Blob/atob/btoa
+const useNodeBuffer = typeof Buffer !== 'undefined' &&
+ (typeof Blob === 'undefined' || typeof atob === 'undefined' ||
+ typeof btoa === 'undefined');
+/**
+ * Calculate the byte length of a JavaScript string.
+ *
+ * Note that a JavaScript string can contain wide characters, therefore the
+ * length of the string is not necessarily equal to the byte length.
+ *
+ * @param str Input string.
+ * @returns Byte length.
+ */
+function stringByteLength(str) {
+ if (useNodeBuffer) {
+ return Buffer.byteLength(str);
+ }
+ return new Blob([str]).size;
+}
+/**
+ * Encode an ArrayBuffer as a base64 encoded string.
+ *
+ * @param buffer `ArrayBuffer` to be converted.
+ * @returns A string that base64-encodes `buffer`.
+ */
+function arrayBufferToBase64String(buffer) {
+ if (useNodeBuffer) {
+ return Buffer.from(buffer).toString('base64');
+ }
+ const buf = new Uint8Array(buffer);
+ let s = '';
+ for (let i = 0, l = buf.length; i < l; i++) {
+ s += String.fromCharCode(buf[i]);
+ }
+ return btoa(s);
+}
+/**
+ * Decode a base64 string as an ArrayBuffer.
+ *
+ * @param str Base64 string.
+ * @returns Decoded `ArrayBuffer`.
+ */
+function base64StringToArrayBuffer(str) {
+ if (useNodeBuffer) {
+ const buf = Buffer.from(str, 'base64');
+ return buf.buffer.slice(buf.byteOffset, buf.byteOffset + buf.byteLength);
+ }
+ const s = atob(str);
+ const buffer = new Uint8Array(s.length);
+ for (let i = 0; i < s.length; ++i) {
+ buffer.set([s.charCodeAt(i)], i);
+ }
+ return buffer.buffer;
+}
+/**
+ * Concatenate a number of ArrayBuffers into one.
+ *
+ * @param buffers A number of array buffers to concatenate.
+ * @returns Result of concatenating `buffers` in order.
+ */
+function concatenateArrayBuffers(buffers) {
+ if (buffers.length === 1) {
+ return buffers[0];
+ }
+ let totalByteLength = 0;
+ buffers.forEach((buffer) => {
+ totalByteLength += buffer.byteLength;
+ });
+ const temp = new Uint8Array(totalByteLength);
+ let offset = 0;
+ buffers.forEach((buffer) => {
+ temp.set(new Uint8Array(buffer), offset);
+ offset += buffer.byteLength;
+ });
+ return temp.buffer;
+}
+/**
+ * Get the basename of a path.
+ *
+ * Behaves in a way analogous to Linux's basename command.
+ *
+ * @param path
+ */
+function basename(path) {
+ const SEPARATOR = '/';
+ path = path.trim();
+ while (path.endsWith(SEPARATOR)) {
+ path = path.slice(0, path.length - 1);
+ }
+ const items = path.split(SEPARATOR);
+ return items[items.length - 1];
+}
+/**
+ * Populate ModelArtifactsInfo fields for a model with JSON topology.
+ * @param modelArtifacts
+ * @returns A ModelArtifactsInfo object.
+ */
+function getModelArtifactsInfoForJSON(modelArtifacts) {
+ if (modelArtifacts.modelTopology instanceof ArrayBuffer) {
+ throw new Error('Expected JSON model topology, received ArrayBuffer.');
+ }
+ return {
+ dateSaved: new Date(),
+ modelTopologyType: 'JSON',
+ modelTopologyBytes: modelArtifacts.modelTopology == null ?
+ 0 :
+ stringByteLength(JSON.stringify(modelArtifacts.modelTopology)),
+ weightSpecsBytes: modelArtifacts.weightSpecs == null ?
+ 0 :
+ stringByteLength(JSON.stringify(modelArtifacts.weightSpecs)),
+ weightDataBytes: modelArtifacts.weightData == null ?
+ 0 :
+ modelArtifacts.weightData.byteLength,
+ };
+}
+/**
+ * Computes mantisa table for casting Float16 to Float32
+ * See http://www.fox-toolkit.org/ftp/fasthalffloatconversion.pdf
+ *
+ * @returns Uint32Array, 2048 mantissa lookup values.
+ */
+function computeFloat16MantisaTable() {
+ const convertMantissa = (i) => {
+ let m = i << 13;
+ let e = 0;
+ while ((m & 0x00800000) === 0) {
+ e -= 0x00800000;
+ m <<= 1;
+ }
+ m &= ~0x00800000;
+ e += 0x38800000;
+ return m | e;
+ };
+ const mantisaTable = new Uint32Array(2048);
+ mantisaTable[0] = 0;
+ for (let i = 1; i < 1024; i++) {
+ mantisaTable[i] = convertMantissa(i);
+ }
+ for (let i = 1024; i < 2048; i++) {
+ mantisaTable[i] = 0x38000000 + ((i - 1024) << 13);
+ }
+ return mantisaTable;
+}
+/**
+ * Computes exponent table for casting Float16 to Float32
+ * See http://www.fox-toolkit.org/ftp/fasthalffloatconversion.pdf
+ *
+ * @returns Uint32Array, 64 exponent lookup values.
+ */
+function computeFloat16ExponentTable() {
+ const exponentTable = new Uint32Array(64);
+ exponentTable[0] = 0;
+ exponentTable[31] = 0x47800000;
+ exponentTable[32] = 0x80000000;
+ exponentTable[63] = 0xc7800000;
+ for (let i = 1; i < 31; i++) {
+ exponentTable[i] = i << 23;
+ }
+ for (let i = 33; i < 63; i++) {
+ exponentTable[i] = 0x80000000 + ((i - 32) << 23);
+ }
+ return exponentTable;
+}
+/**
+ * Computes offset table for casting Float16 to Float32
+ * See http://www.fox-toolkit.org/ftp/fasthalffloatconversion.pdf
+ *
+ * @returns Uint32Array, 6d offset values.
+ */
+function computeFloat16OffsetTable() {
+ const offsetTable = new Uint32Array(64);
+ for (let i = 0; i < 64; i++) {
+ offsetTable[i] = 1024;
+ }
+ offsetTable[0] = offsetTable[32] = 0;
+ return offsetTable;
+}
+/**
+ * Retrieve a Float16 decoder which will decode a ByteArray of Float16 values
+ * to a Float32Array.
+ *
+ * @returns Function (buffer: Uint16Array) => Float32Array which decodes
+ * the Uint16Array of Float16 bytes to a Float32Array.
+ */
+function getFloat16Decoder() {
+ // Algorithm is based off of http://www.fox-toolkit.org/ftp/fasthalffloatconversion.pdf
+ // Cache lookup tables
+ const mantisaTable = computeFloat16MantisaTable();
+ const exponentTable = computeFloat16ExponentTable();
+ const offsetTable = computeFloat16OffsetTable();
+ return (quantizedArray) => {
+ const buffer = new ArrayBuffer(4 * quantizedArray.length);
+ const bufferUint32View = new Uint32Array(buffer);
+ for (let index = 0; index < quantizedArray.length; index++) {
+ const float16Bits = quantizedArray[index];
+ const float32Bits = mantisaTable[offsetTable[float16Bits >> 10] + (float16Bits & 0x3ff)] +
+ exponentTable[float16Bits >> 10];
+ bufferUint32View[index] = float32Bits;
+ }
+ return new Float32Array(buffer);
+ };
+}
+//# sourceMappingURL=io_utils.js.map
+/* WEBPACK VAR INJECTION */}.call(this, __webpack_require__(39).Buffer))
+
+/***/ }),
+/* 14 */
+/***/ (function(module, __webpack_exports__, __webpack_require__) {
+
+"use strict";
+
+// EXPORTS
+__webpack_require__.d(__webpack_exports__, "f", function() { return /* binding */ iteratorFromItems; });
+__webpack_require__.d(__webpack_exports__, "e", function() { return /* binding */ iteratorFromFunction; });
+__webpack_require__.d(__webpack_exports__, "d", function() { return /* binding */ iteratorFromConcatenated; });
+__webpack_require__.d(__webpack_exports__, "g", function() { return /* binding */ iteratorFromZipped; });
+__webpack_require__.d(__webpack_exports__, "a", function() { return /* binding */ lazy_iterator_LazyIterator; });
+__webpack_require__.d(__webpack_exports__, "b", function() { return /* binding */ lazy_iterator_OneToManyIterator; });
+__webpack_require__.d(__webpack_exports__, "c", function() { return /* binding */ ZipMismatchMode; });
+
+// UNUSED EXPORTS: iteratorFromIncrementing, iteratorFromConcatenatedFunction, ChainedIterator, PrefetchIterator, ShuffleIterator
+
+// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/index.js + 269 modules
+var dist = __webpack_require__(0);
+
+// EXTERNAL MODULE: ./node_modules/seedrandom/index.js
+var seedrandom = __webpack_require__(20);
+
+// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-data/dist/util/deep_map.js
+var deep_map = __webpack_require__(19);
+
+// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-data/dist/util/deep_clone.js
+/**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * =============================================================================
+ */
+
+
+function deepClone(container) {
+ return Object(deep_map["b" /* deepMap */])(container, cloneIfTensor);
+}
+// tslint:disable-next-line: no-any
+function cloneIfTensor(item) {
+ if (item instanceof dist["Tensor"]) {
+ return ({ value: item.clone(), recurse: false });
+ }
+ else if (Object(deep_map["e" /* isIterable */])(item)) {
+ return { value: null, recurse: true };
+ }
+ else {
+ return { value: item, recurse: false };
+ }
+}
+//# sourceMappingURL=deep_clone.js.map
+// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-data/dist/util/ring_buffer.js
+/**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * =============================================================================
+ */
+/**
+ * A ring buffer, providing O(1) FIFO, LIFO, and related operations.
+ */
+class RingBuffer {
+ /**
+ * Constructs a `RingBuffer`.
+ * @param capacity The number of items that the buffer can accomodate.
+ */
+ constructor(capacity) {
+ this.capacity = capacity;
+ // Note we store the indices in the range 0 <= index < 2*capacity.
+ // This allows us to distinguish the full from the empty case.
+ // See https://www.snellman.net/blog/archive/2016-12-13-ring-buffers/
+ this.begin = 0; // inclusive
+ this.end = 0; // exclusive
+ if (capacity == null) {
+ throw new RangeError('Can\'t create a ring buffer of unknown capacity.');
+ }
+ if (capacity < 1) {
+ throw new RangeError('Can\'t create ring buffer of capacity < 1.');
+ }
+ this.data = new Array(capacity);
+ this.doubledCapacity = 2 * capacity;
+ }
+ /**
+ * Map any index into the range 0 <= index < 2*capacity.
+ */
+ wrap(index) {
+ // don't trust % on negative numbers
+ while (index < 0) {
+ index += this.doubledCapacity;
+ }
+ return index % this.doubledCapacity;
+ }
+ get(index) {
+ if (index < 0) {
+ throw new RangeError('Can\'t get item at a negative index.');
+ }
+ return this.data[index % this.capacity];
+ }
+ set(index, value) {
+ if (index < 0) {
+ throw new RangeError('Can\'t set item at a negative index.');
+ }
+ this.data[index % this.capacity] = value;
+ }
+ /**
+ * Returns the current number of items in the buffer.
+ */
+ length() {
+ let length = this.end - this.begin;
+ if (length < 0) {
+ length = this.doubledCapacity + length;
+ }
+ return length;
+ }
+ /**
+ * Reports whether the buffer is full.
+ * @returns true if the number of items in the buffer equals its capacity, and
+ * false otherwise.
+ */
+ isFull() {
+ return this.length() === this.capacity;
+ }
+ /**
+ * Reports whether the buffer is empty.
+ * @returns true if the number of items in the buffer equals zero, and
+ * false otherwise.
+ */
+ isEmpty() {
+ return this.length() === 0;
+ }
+ /**
+ * Adds an item to the end of the buffer.
+ */
+ push(value) {
+ if (this.isFull()) {
+ throw new RangeError('Ring buffer is full.');
+ }
+ this.set(this.end, value);
+ this.end = this.wrap(this.end + 1);
+ }
+ /**
+ * Adds many items to the end of the buffer, in order.
+ */
+ pushAll(values) {
+ for (const value of values) {
+ this.push(value);
+ }
+ }
+ /**
+ * Removes and returns the last item in the buffer.
+ */
+ pop() {
+ if (this.isEmpty()) {
+ throw new RangeError('Ring buffer is empty.');
+ }
+ this.end = this.wrap(this.end - 1);
+ const result = this.get(this.end);
+ this.set(this.end, undefined);
+ return result;
+ }
+ /**
+ * Adds an item to the beginning of the buffer.
+ */
+ unshift(value) {
+ if (this.isFull()) {
+ throw new RangeError('Ring buffer is full.');
+ }
+ this.begin = this.wrap(this.begin - 1);
+ this.set(this.begin, value);
+ }
+ /**
+ * Removes and returns the first item in the buffer.
+ */
+ shift() {
+ if (this.isEmpty()) {
+ throw new RangeError('Ring buffer is empty.');
+ }
+ const result = this.get(this.begin);
+ this.set(this.begin, undefined);
+ this.begin = this.wrap(this.begin + 1);
+ return result;
+ }
+ /**
+ * Removes and returns a specific item in the buffer, and moves the last item
+ * to the vacated slot. This is useful for implementing a shuffling stream.
+ * Note that this operation necessarily scrambles the original order.
+ *
+ * @param relativeIndex: the index of the item to remove, relative to the
+ * first item in the buffer (e.g., hiding the ring nature of the underlying
+ * storage).
+ */
+ shuffleExcise(relativeIndex) {
+ if (this.isEmpty()) {
+ throw new RangeError('Ring buffer is empty.');
+ }
+ const index = this.wrap(this.begin + relativeIndex);
+ const result = this.get(index);
+ this.set(index, this.pop());
+ return result;
+ }
+}
+//# sourceMappingURL=ring_buffer.js.map
+// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-data/dist/util/growing_ring_buffer.js
+/**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * =============================================================================
+ */
+
+class growing_ring_buffer_GrowingRingBuffer extends RingBuffer {
+ /**
+ * Constructs a `GrowingRingBuffer`.
+ */
+ constructor() {
+ super(growing_ring_buffer_GrowingRingBuffer.INITIAL_CAPACITY);
+ }
+ isFull() {
+ return false;
+ }
+ push(value) {
+ if (super.isFull()) {
+ this.expand();
+ }
+ super.push(value);
+ }
+ unshift(value) {
+ if (super.isFull()) {
+ this.expand();
+ }
+ super.unshift(value);
+ }
+ /**
+ * Doubles the capacity of the buffer.
+ */
+ expand() {
+ const newCapacity = this.capacity * 2;
+ const newData = new Array(newCapacity);
+ const len = this.length();
+ // Rotate the buffer to start at index 0 again, since we can't just
+ // allocate more space at the end.
+ for (let i = 0; i < len; i++) {
+ newData[i] = this.get(this.wrap(this.begin + i));
+ }
+ this.data = newData;
+ this.capacity = newCapacity;
+ this.doubledCapacity = 2 * this.capacity;
+ this.begin = 0;
+ this.end = len;
+ }
+}
+growing_ring_buffer_GrowingRingBuffer.INITIAL_CAPACITY = 32;
+//# sourceMappingURL=growing_ring_buffer.js.map
+// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-data/dist/iterators/lazy_iterator.js
+/**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * =============================================================================
+ */
+
+
+
+
+
+
+// Here we implement a simple asynchronous iterator.
+// This lets us avoid using either third-party stream libraries or
+// recent TypeScript language support requiring polyfills.
+/**
+ * Create a `LazyIterator` from an array of items.
+ */
+function iteratorFromItems(items) {
+ return new lazy_iterator_ArrayIterator(items);
+}
+/**
+ * Create a `LazyIterator` of incrementing integers.
+ */
+function iteratorFromIncrementing(start) {
+ let i = start;
+ return iteratorFromFunction(() => ({ value: i++, done: false }));
+}
+/**
+ * Create a `LazyIterator` from a function.
+ *
+ * ```js
+ * let i = -1;
+ * const func = () =>
+ * ++i < 5 ? {value: i, done: false} : {value: null, done: true};
+ * const iter = tf.data.iteratorFromFunction(func);
+ * await iter.forEachAsync(e => console.log(e));
+ * ```
+ *
+ * @param func A function that produces data on each call.
+ */
+function iteratorFromFunction(func) {
+ return new FunctionCallIterator(func);
+}
+/**
+ * Create a `LazyIterator` by concatenating underlying streams, which are
+ * themselves provided as a stream.
+ *
+ * This can also be thought of as a "stream flatten" operation.
+ *
+ * @param baseIterators A stream of streams to be concatenated.
+ * @param baseErrorHandler An optional function that can intercept `Error`s
+ * raised during a `next()` call on the base stream. This function can decide
+ * whether the error should be propagated, whether the error should be
+ * ignored, or whether the base stream should be terminated.
+ */
+function iteratorFromConcatenated(baseIterators, baseErrorHandler) {
+ return new ChainedIterator(baseIterators, baseErrorHandler);
+}
+/**
+ * Create a `LazyIterator` by concatenating streams produced by calling a
+ * stream-generating function a given number of times.
+ *
+ * Since a `LazyIterator` is read-once, it cannot be repeated, but this
+ * function can be used to achieve a similar effect:
+ *
+ * LazyIterator.ofConcatenatedFunction(() => new MyIterator(), 6);
+ *
+ * @param iteratorFunc: A function that produces a new stream on each call.
+ * @param count: The number of times to call the function.
+ * @param baseErrorHandler An optional function that can intercept `Error`s
+ * raised during a `next()` call on the base stream. This function can decide
+ * whether the error should be propagated, whether the error should be
+ * ignored, or whether the base stream should be terminated.
+ */
+function iteratorFromConcatenatedFunction(iteratorFunc, count, baseErrorHandler) {
+ return iteratorFromConcatenated(iteratorFromFunction(iteratorFunc).take(count), baseErrorHandler);
+}
+/**
+ * Create a `LazyIterator` by zipping together an array, dict, or nested
+ * structure of `LazyIterator`s (and perhaps additional constants).
+ *
+ * The underlying streams must provide elements in a consistent order such
+ * that they correspond.
+ *
+ * Typically, the underlying streams should have the same number of
+ * elements. If they do not, the behavior is determined by the
+ * `mismatchMode` argument.
+ *
+ * The nested structure of the `iterators` argument determines the
+ * structure of elements in the resulting iterator.
+ *
+ * @param iterators: An array or object containing LazyIterators at the
+ * leaves.
+ * @param mismatchMode: Determines what to do when one underlying iterator
+ * is exhausted before the others. `ZipMismatchMode.FAIL` (the default)
+ * causes an error to be thrown in this case. `ZipMismatchMode.SHORTEST`
+ * causes the zipped iterator to terminate with the furst underlying
+ * streams, so elements remaining on the longer streams are ignored.
+ * `ZipMismatchMode.LONGEST` causes the zipped stream to continue, filling
+ * in nulls for the exhausted streams, until all streams are exhausted.
+ */
+function iteratorFromZipped(iterators, mismatchMode = ZipMismatchMode.FAIL) {
+ return new lazy_iterator_ZipIterator(iterators, mismatchMode);
+}
+/**
+ * An asynchronous iterator, providing lazy access to a potentially
+ * unbounded stream of elements.
+ *
+ * Iterator can be obtained from a dataset:
+ * `const iter = await dataset.iterator();`
+ */
+class lazy_iterator_LazyIterator {
+ /**
+ * Collect all remaining elements of a bounded stream into an array.
+ * Obviously this will succeed only for small streams that fit in memory.
+ * Useful for testing.
+ *
+ * @returns A Promise for an array of stream elements, which will resolve
+ * when the stream is exhausted.
+ */
+ async toArray() {
+ const result = [];
+ let x = await this.next();
+ while (!x.done) {
+ result.push(x.value);
+ x = await this.next();
+ }
+ return result;
+ }
+ /**
+ * Collect all elements of this dataset into an array with prefetching 100
+ * elements. This is useful for testing, because the prefetch changes the
+ * order in which the Promises are resolved along the processing pipeline.
+ * This may help expose bugs where results are dependent on the order of
+ * Promise resolution rather than on the logical order of the stream (i.e.,
+ * due to hidden mutable state).
+ *
+ * @returns A Promise for an array of stream elements, which will resolve
+ * when the stream is exhausted.
+ */
+ async toArrayForTest() {
+ const stream = this.prefetch(100);
+ const result = [];
+ let x = await stream.next();
+ while (!x.done) {
+ result.push(x.value);
+ x = await stream.next();
+ }
+ return result;
+ }
+ /**
+ * Draw items from the stream until it is exhausted.
+ *
+ * This can be useful when the stream has side effects but no output. In
+ * that case, calling this function guarantees that the stream will be
+ * fully processed.
+ */
+ async resolveFully() {
+ let x = await this.next();
+ while (!x.done) {
+ x = await this.next();
+ }
+ }
+ /**
+ * Draw items from the stream until it is exhausted, or a predicate fails.
+ *
+ * This can be useful when the stream has side effects but no output. In
+ * that case, calling this function guarantees that the stream will be
+ * fully processed.
+ */
+ async resolveWhile(predicate) {
+ let x = await this.next();
+ let shouldContinue = predicate(x.value);
+ while ((!x.done) && shouldContinue) {
+ x = await this.next();
+ shouldContinue = predicate(x.value);
+ }
+ }
+ /**
+ * Handles errors thrown on this stream using a provided handler function.
+ *
+ * @param handler A function that handles any `Error` thrown during a `next()`
+ * call and returns true if the stream should continue (dropping the failed
+ * call) or false if the stream should quietly terminate. If the handler
+ * itself throws (or rethrows) an `Error`, that will be propagated.
+ *
+ * @returns A `LazyIterator` of elements passed through from upstream,
+ * possibly filtering or terminating on upstream `next()` calls that
+ * throw an `Error`.
+ */
+ handleErrors(handler) {
+ return new ErrorHandlingLazyIterator(this, handler);
+ }
+ // TODO(soergel): Implement reduce() etc.
+ /**
+ * Filters this stream according to `predicate`.
+ *
+ * @param predicate A function mapping a stream element to a boolean or a
+ * `Promise` for one.
+ *
+ * @returns A `LazyIterator` of elements for which the predicate was true.
+ */
+ filter(predicate) {
+ return new lazy_iterator_FilterIterator(this, predicate);
+ }
+ /**
+ * Maps this stream through a 1-to-1 transform.
+ *
+ * @param transform A function mapping a stream element to a transformed
+ * element.
+ *
+ * @returns A `LazyIterator` of transformed elements.
+ */
+ map(transform) {
+ return new lazy_iterator_MapIterator(this, transform);
+ }
+ /**
+ * Maps this stream through an async 1-to-1 transform.
+ *
+ * @param transform A function mapping a stream element to a `Promise` for a
+ * transformed stream element.
+ *
+ * @returns A `LazyIterator` of transformed elements.
+ */
+ mapAsync(transform) {
+ return new lazy_iterator_AsyncMapIterator(this, transform);
+ }
+ /**
+ * Maps this stream through a 1-to-1 transform, forcing serial execution.
+ *
+ * @param transform A function mapping a stream element to a transformed
+ * element.
+ *
+ * @returns A `LazyIterator` of transformed elements.
+ */
+ serialMapAsync(transform) {
+ return new lazy_iterator_AsyncMapIterator(this, transform).serial();
+ }
+ /**
+ * Maps this stream through a 1-to-many transform.
+ *
+ * @param transform A function mapping a stream element to an array of
+ * transformed elements.
+ *
+ * @returns A `DataStream` of transformed elements.
+ */
+ flatmap(transform) {
+ return new lazy_iterator_FlatmapIterator(this, transform);
+ }
+ /**
+ * Apply a function to every element of the stream.
+ *
+ * @param f A function to apply to each stream element.
+ */
+ async forEachAsync(f) {
+ return this.map(f).resolveFully();
+ }
+ /**
+ * Apply a function to every element of the stream, forcing serial execution.
+ *
+ * @param f A function to apply to each stream element. Should return 'true'
+ * to indicate that the stream should continue, or 'false' to cause it to
+ * terminate.
+ */
+ async serialForEach(f) {
+ return this.serialMapAsync(f).resolveWhile(x => (x === true));
+ }
+ /**
+ * Groups elements into batches, represented as arrays of elements.
+ *
+ * We can think of the elements of this iterator as 'rows' (even if they are
+ * nested structures). By the same token, consecutive values for a given
+ * key within the elements form a 'column'. This matches the usual sense of
+ * 'row' and 'column' when processing tabular data (e.g., parsing a CSV).
+ *
+ * Thus, "Row-major" means that the resulting batch is simply a collection of
+ * rows: `[row1, row2, row3, ...]`. This is contrast to the column-major
+ * form, which is needed for vectorized computation.
+ *
+ * @param batchSize The number of elements desired per batch.
+ * @param smallLastBatch Whether to emit the final batch when it has fewer
+ * than batchSize elements. Default true.
+ * @returns A `LazyIterator` of batches of elements, represented as arrays
+ * of the original element type.
+ */
+ rowMajorBatch(batchSize, smallLastBatch = true) {
+ return new RowMajorBatchIterator(this, batchSize, smallLastBatch);
+ }
+ /**
+ * Groups elements into batches, represented in column-major form.
+ *
+ * We can think of the elements of this iterator as 'rows' (even if they are
+ * nested structures). By the same token, consecutive values for a given
+ * key within the elements form a 'column'. This matches the usual sense of
+ * 'row' and 'column' when processing tabular data (e.g., parsing a CSV).
+ *
+ * Thus, "column-major" means that the resulting batch is a (potentially
+ * nested) structure representing the columns. Each column entry, then,
+ * contains a collection of the values found in that column for a range of
+ * input elements. This representation allows for vectorized computation, in
+ * contrast to the row-major form.
+ *
+ * The inputs should all have the same nested structure (i.e., of arrays and
+ * dicts). The result is a single object with the same nested structure,
+ * where the leaves are arrays collecting the values of the inputs at that
+ * location (or, optionally, the result of a custom function applied to those
+ * arrays).
+ *
+ * @param batchSize The number of elements desired per batch.
+ * @param smallLastBatch Whether to emit the final batch when it has fewer
+ * than batchSize elements. Default true.
+ * @param zipFn: (optional) A function that expects an array of elements at a
+ * single node of the object tree, and returns a `DeepMapResult`. The
+ * `DeepMapResult` either provides a result value for that node (i.e.,
+ * representing the subtree), or indicates that the node should be processed
+ * recursively. The default zipFn recurses as far as possible and places
+ * arrays at the leaves.
+ * @returns A `LazyIterator` of batches of elements, represented as an object
+ * with collections at the leaves.
+ */
+ columnMajorBatch(batchSize, smallLastBatch = true,
+ // tslint:disable-next-line:no-any
+ zipFn = deep_map["f" /* zipToList */]) {
+ // First collect the desired number of input elements as a row-major batch.
+ const rowBatches = this.rowMajorBatch(batchSize, smallLastBatch);
+ // Now 'rotate' or 'pivot' the data, collecting all values from each column
+ // in the batch (i.e., for each key within the elements) into an array.
+ return rowBatches.map(x => Object(deep_map["d" /* deepZip */])(x, zipFn));
+ }
+ /**
+ * Concatenate this `LazyIterator` with another.
+ *
+ * @param iterator A `LazyIterator` to be concatenated onto this one.
+ * @param baseErrorHandler An optional function that can intercept `Error`s
+ * raised during a `next()` call on the base stream. This function can
+ * decide whether the error should be propagated, whether the error should
+ * be ignored, or whether the base stream should be terminated.
+ * @returns A `LazyIterator`.
+ */
+ concatenate(iterator, baseErrorHandler) {
+ return new ChainedIterator(iteratorFromItems([this, iterator]), baseErrorHandler);
+ }
+ /**
+ * Limits this stream to return at most `count` items.
+ *
+ * @param count The maximum number of items to provide from the stream. If
+ * a negative or undefined value is given, the entire stream is returned
+ * unaltered.
+ */
+ take(count) {
+ if (count < 0 || count == null) {
+ return this;
+ }
+ return new TakeIterator(this, count);
+ }
+ /**
+ * Skips the first `count` items in this stream.
+ *
+ * @param count The number of items to skip. If a negative or undefined
+ * value is given, the entire stream is returned unaltered.
+ */
+ skip(count) {
+ if (count < 0 || count == null) {
+ return this;
+ }
+ return new lazy_iterator_SkipIterator(this, count);
+ }
+ /**
+ * Prefetch the first `bufferSize` items in this stream.
+ *
+ * Note this prefetches Promises, but makes no guarantees about when those
+ * Promises resolve.
+ *
+ * @param bufferSize: An integer specifying the number of elements to be
+ * prefetched.
+ */
+ prefetch(bufferSize) {
+ return new lazy_iterator_PrefetchIterator(this, bufferSize);
+ }
+ // TODO(soergel): deep sharded shuffle, where supported
+ /**
+ * Randomly shuffles the elements of this stream.
+ *
+ * @param bufferSize: An integer specifying the number of elements from
+ * this stream from which the new stream will sample.
+ * @param seed: (Optional.) An integer specifying the random seed that
+ * will be used to create the distribution.
+ */
+ shuffle(windowSize, seed) {
+ return new lazy_iterator_ShuffleIterator(this, windowSize, seed);
+ }
+ /**
+ * Force an iterator to execute serially: each next() call will await the
+ * prior one, so that they cannot execute concurrently.
+ */
+ serial() {
+ return new SerialIterator(this);
+ }
+}
+// ============================================================================
+// The following private classes serve to implement the chainable methods
+// on LazyIterator. Unfortunately they can't be placed in separate files,
+// due to resulting trouble with circular imports.
+// ============================================================================
+// Iterators that just extend LazyIterator directly
+// ============================================================================
+class lazy_iterator_ArrayIterator extends lazy_iterator_LazyIterator {
+ constructor(items) {
+ super();
+ this.items = items;
+ this.trav = 0;
+ }
+ summary() {
+ return `Array of ${this.items.length} items`;
+ }
+ async next() {
+ if (this.trav >= this.items.length) {
+ return { value: null, done: true };
+ }
+ const item = this.items[this.trav];
+ this.trav++;
+ return { value: deepClone(item), done: false };
+ }
+}
+class FunctionCallIterator extends lazy_iterator_LazyIterator {
+ constructor(nextFn) {
+ super();
+ this.nextFn = nextFn;
+ }
+ summary() {
+ return `Function call`;
+ }
+ async next() {
+ try {
+ return this.nextFn();
+ }
+ catch (e) {
+ // Modify the error message but leave the stack trace intact
+ e.message =
+ `Error thrown while iterating through a dataset: ${e.message}`;
+ throw e;
+ }
+ }
+}
+class SerialIterator extends lazy_iterator_LazyIterator {
+ constructor(upstream) {
+ super();
+ this.upstream = upstream;
+ this.lastRead = Promise.resolve({ value: null, done: false });
+ }
+ summary() {
+ return `${this.upstream.summary()} -> Serial`;
+ }
+ async next() {
+ // This sets this.lastRead to a new Promise right away, as opposed to
+ // saying `await this.lastRead; this.lastRead = this.serialNext();` which
+ // would not work because this.nextRead would be updated only after the
+ // promise resolves.
+ this.lastRead = this.lastRead.then(() => this.serialNext());
+ return this.lastRead;
+ }
+ async serialNext() {
+ return this.upstream.next();
+ }
+}
+class lazy_iterator_SkipIterator extends lazy_iterator_LazyIterator {
+ constructor(upstream, maxCount) {
+ super();
+ this.upstream = upstream;
+ this.maxCount = maxCount;
+ // Local state that should not be clobbered by out-of-order execution.
+ this.count = 0;
+ this.lastRead = Promise.resolve({ value: null, done: false });
+ }
+ summary() {
+ return `${this.upstream.summary()} -> Skip`;
+ }
+ async next() {
+ // This sets this.lastRead to a new Promise right away, as opposed to
+ // saying `await this.lastRead; this.lastRead = this.serialNext();` which
+ // would not work because this.nextRead would be updated only after the
+ // promise resolves.
+ this.lastRead = this.lastRead.then(() => this.serialNext());
+ return this.lastRead;
+ }
+ async serialNext() {
+ // TODO(soergel): consider tradeoffs of reading in parallel, eg.
+ // collecting next() promises in an Array and then waiting for
+ // Promise.all() of those. Benefit: pseudo-parallel execution. Drawback:
+ // maybe delayed GC.
+ while (this.count++ < this.maxCount) {
+ const skipped = await this.upstream.next();
+ // short-circuit if upstream is already empty
+ if (skipped.done) {
+ return skipped;
+ }
+ dist["dispose"](skipped.value);
+ }
+ return this.upstream.next();
+ }
+}
+class TakeIterator extends lazy_iterator_LazyIterator {
+ constructor(upstream, maxCount) {
+ super();
+ this.upstream = upstream;
+ this.maxCount = maxCount;
+ this.count = 0;
+ }
+ summary() {
+ return `${this.upstream.summary()} -> Take`;
+ }
+ async next() {
+ if (this.count++ >= this.maxCount) {
+ return { value: null, done: true };
+ }
+ return this.upstream.next();
+ }
+}
+// Note this batch just groups items into row-wise element arrays.
+// Rotating these to a column-wise representation happens only at the dataset
+// level.
+class RowMajorBatchIterator extends lazy_iterator_LazyIterator {
+ constructor(upstream, batchSize, enableSmallLastBatch = true) {
+ super();
+ this.upstream = upstream;
+ this.batchSize = batchSize;
+ this.enableSmallLastBatch = enableSmallLastBatch;
+ this.lastRead = Promise.resolve({ value: null, done: false });
+ }
+ summary() {
+ return `${this.upstream.summary()} -> RowMajorBatch`;
+ }
+ async next() {
+ // This sets this.lastRead to a new Promise right away, as opposed to
+ // saying `await this.lastRead; this.lastRead = this.serialNext();` which
+ // would not work because this.nextRead would be updated only after the
+ // promise resolves.
+ this.lastRead = this.lastRead.then(() => this.serialNext());
+ return this.lastRead;
+ }
+ async serialNext() {
+ const batch = [];
+ while (batch.length < this.batchSize) {
+ const item = await this.upstream.next();
+ if (item.done) {
+ if (this.enableSmallLastBatch && batch.length > 0) {
+ return { value: batch, done: false };
+ }
+ return { value: null, done: true };
+ }
+ batch.push(item.value);
+ }
+ return { value: batch, done: false };
+ }
+}
+class lazy_iterator_FilterIterator extends lazy_iterator_LazyIterator {
+ constructor(upstream, predicate) {
+ super();
+ this.upstream = upstream;
+ this.predicate = predicate;
+ this.lastRead = Promise.resolve({ value: null, done: false });
+ }
+ summary() {
+ return `${this.upstream.summary()} -> Filter`;
+ }
+ async next() {
+ // This sets this.lastRead to a new Promise right away, as opposed to
+ // saying `await this.lastRead; this.lastRead = this.serialNext();` which
+ // would not work because this.nextRead would be updated only after the
+ // promise resolves.
+ this.lastRead = this.lastRead.then(() => this.serialNext());
+ return this.lastRead;
+ }
+ async serialNext() {
+ while (true) {
+ const item = await this.upstream.next();
+ if (item.done || this.predicate(item.value)) {
+ return item;
+ }
+ dist["dispose"](item.value);
+ }
+ }
+}
+class lazy_iterator_MapIterator extends lazy_iterator_LazyIterator {
+ constructor(upstream, transform) {
+ super();
+ this.upstream = upstream;
+ this.transform = transform;
+ }
+ summary() {
+ return `${this.upstream.summary()} -> Map`;
+ }
+ async next() {
+ const item = await this.upstream.next();
+ if (item.done) {
+ return { value: null, done: true };
+ }
+ const inputTensors = dist["tensor_util"].getTensorsInContainer(item.value);
+ // Careful: the transform may mutate the item in place.
+ // That's why we have to remember the input Tensors above, and then
+ // below dispose only those that were not passed through to the output.
+ // Note too that the transform function is responsible for tidying
+ // any intermediate Tensors. Here we are concerned only about the
+ // inputs.
+ const mapped = this.transform(item.value);
+ const outputTensors = dist["tensor_util"].getTensorsInContainer(mapped);
+ // TODO(soergel) faster intersection
+ // TODO(soergel) move to tf.disposeExcept(in, out)?
+ for (const t of inputTensors) {
+ if (!dist["tensor_util"].isTensorInList(t, outputTensors)) {
+ t.dispose();
+ }
+ }
+ return { value: mapped, done: false };
+ }
+}
+class ErrorHandlingLazyIterator extends lazy_iterator_LazyIterator {
+ constructor(upstream, handler) {
+ super();
+ this.upstream = upstream;
+ this.handler = handler;
+ this.count = 0;
+ this.lastRead = Promise.resolve({ value: null, done: false });
+ }
+ summary() {
+ return `${this.upstream.summary()} -> handleErrors`;
+ }
+ async next() {
+ // This sets this.lastRead to a new Promise right away, as opposed to
+ // saying `await this.lastRead; this.lastRead = this.serialNext();` which
+ // would not work because this.nextRead would be updated only after the
+ // promise resolves.
+ this.lastRead = this.lastRead.then(() => this.serialNext());
+ return this.lastRead;
+ }
+ async serialNext() {
+ while (true) {
+ try {
+ return await this.upstream.next();
+ }
+ catch (e) {
+ if (!this.handler(e)) {
+ return { value: null, done: true };
+ }
+ // If the handler returns true, loop and fetch the next upstream item.
+ // If the upstream iterator throws an endless stream of errors, and if
+ // the handler says to ignore them, then we loop forever here. That is
+ // the correct behavior-- it's up to the handler to decide when to stop.
+ }
+ }
+ }
+}
+class lazy_iterator_AsyncMapIterator extends lazy_iterator_LazyIterator {
+ constructor(upstream, transform) {
+ super();
+ this.upstream = upstream;
+ this.transform = transform;
+ }
+ summary() {
+ return `${this.upstream.summary()} -> AsyncMap`;
+ }
+ async next() {
+ const item = await this.upstream.next();
+ if (item.done) {
+ return { value: null, done: true };
+ }
+ const inputTensors = dist["tensor_util"].getTensorsInContainer(item.value);
+ // Careful: the transform may mutate the item in place.
+ // That's why we have to remember the input Tensors above, and then
+ // below dispose only those that were not passed through to the output.
+ // Note too that the transform function is responsible for tidying
+ // any intermediate Tensors. Here we are concerned only about the
+ // inputs.
+ const mapped = await this.transform(item.value);
+ const outputTensors = dist["tensor_util"].getTensorsInContainer(mapped);
+ // TODO(soergel) faster intersection
+ // TODO(soergel) move to tf.disposeExcept(in, out)?
+ for (const t of inputTensors) {
+ if (!dist["tensor_util"].isTensorInList(t, outputTensors)) {
+ t.dispose();
+ }
+ }
+ return { value: mapped, done: false };
+ }
+}
+// Iterators that maintain a queue of pending items
+// ============================================================================
+/**
+ * A base class for transforming streams that operate by maintaining an
+ * output queue of elements that are ready to return via next(). This is
+ * commonly required when the transformation is 1-to-many: A call to next()
+ * may trigger a call to the underlying stream, which will produce many
+ * mapped elements of this stream-- of which we need to return only one, so
+ * we have to queue the rest.
+ */
+class lazy_iterator_OneToManyIterator extends lazy_iterator_LazyIterator {
+ constructor() {
+ super();
+ this.outputQueue = new growing_ring_buffer_GrowingRingBuffer();
+ this.lastRead = Promise.resolve({ value: null, done: false });
+ }
+ async next() {
+ // This sets this.lastRead to a new Promise right away, as opposed to
+ // saying `await this.lastRead; this.lastRead = this.serialNext();` which
+ // would not work because this.nextRead would be updated only after the
+ // promise resolves.
+ this.lastRead = this.lastRead.then(() => this.serialNext());
+ return this.lastRead;
+ }
+ async serialNext() {
+ // Fetch so that the queue contains at least one item if possible.
+ // If the upstream source is exhausted, AND there are no items left in
+ // the output queue, then this stream is also exhausted.
+ while (this.outputQueue.length() === 0) {
+ // TODO(soergel): consider parallel reads.
+ if (!await this.pump()) {
+ return { value: null, done: true };
+ }
+ }
+ return { value: this.outputQueue.shift(), done: false };
+ }
+}
+class lazy_iterator_FlatmapIterator extends lazy_iterator_OneToManyIterator {
+ constructor(upstream, transform) {
+ super();
+ this.upstream = upstream;
+ this.transform = transform;
+ }
+ summary() {
+ return `${this.upstream.summary()} -> Flatmap`;
+ }
+ async pump() {
+ const item = await this.upstream.next();
+ if (item.done) {
+ return false;
+ }
+ const inputTensors = dist["tensor_util"].getTensorsInContainer(item.value);
+ // Careful: the transform may mutate the item in place.
+ // that's why we have to remember the input Tensors above, and then
+ // below dispose only those that were not passed through to the output.
+ // Note too that the transform function is responsible for tidying any
+ // intermediate Tensors. Here we are concerned only about the inputs.
+ const mappedArray = this.transform(item.value);
+ const outputTensors = dist["tensor_util"].getTensorsInContainer(mappedArray);
+ this.outputQueue.pushAll(mappedArray);
+ // TODO(soergel) faster intersection, and deduplicate outputTensors
+ // TODO(soergel) move to tf.disposeExcept(in, out)?
+ for (const t of inputTensors) {
+ if (!dist["tensor_util"].isTensorInList(t, outputTensors)) {
+ t.dispose();
+ }
+ }
+ return true;
+ }
+}
+/**
+ * Provides a `LazyIterator` that concatenates a stream of underlying
+ * streams.
+ *
+ * Doing this in a concurrency-safe way requires some trickery. In
+ * particular, we want this stream to return the elements from the
+ * underlying streams in the correct order according to when next() was
+ * called, even if the resulting Promises resolve in a different order.
+ */
+class ChainedIterator extends lazy_iterator_LazyIterator {
+ constructor(iterators, baseErrorHandler) {
+ super();
+ this.baseErrorHandler = baseErrorHandler;
+ // Strict Promise execution order:
+ // a next() call may not even begin until the previous one completes.
+ this.lastRead = null;
+ // Local state that should not be clobbered by out-of-order execution.
+ this.iterator = null;
+ this.moreIterators = iterators;
+ }
+ summary() {
+ const upstreamSummaries = 'TODO: fill in upstream of chained summaries';
+ return `${upstreamSummaries} -> Chained`;
+ }
+ async next() {
+ this.lastRead = this.readFromChain(this.lastRead);
+ return this.lastRead;
+ }
+ async readFromChain(lastRead) {
+ // Must await on the previous read since the previous read may have advanced
+ // the stream of streams, from which we need to read.
+ // This is unfortunate since we can't parallelize reads. Which means
+ // prefetching of chained streams is a no-op.
+ // One solution is to prefetch immediately upstream of this.
+ await lastRead;
+ if (this.iterator == null) {
+ const iteratorResult = await this.moreIterators.next();
+ if (iteratorResult.done) {
+ // No more streams to stream from.
+ return { value: null, done: true };
+ }
+ this.iterator = iteratorResult.value;
+ if (this.baseErrorHandler != null) {
+ this.iterator = this.iterator.handleErrors(this.baseErrorHandler);
+ }
+ }
+ const itemResult = await this.iterator.next();
+ if (itemResult.done) {
+ this.iterator = null;
+ return this.readFromChain(lastRead);
+ }
+ return itemResult;
+ }
+}
+var ZipMismatchMode;
+(function (ZipMismatchMode) {
+ ZipMismatchMode[ZipMismatchMode["FAIL"] = 0] = "FAIL";
+ ZipMismatchMode[ZipMismatchMode["SHORTEST"] = 1] = "SHORTEST";
+ ZipMismatchMode[ZipMismatchMode["LONGEST"] = 2] = "LONGEST"; // use nulls for exhausted streams; use up the longest stream.
+})(ZipMismatchMode || (ZipMismatchMode = {}));
+/**
+ * Provides a `LazyIterator` that zips together an array, dict, or nested
+ * structure of `LazyIterator`s (and perhaps additional constants).
+ *
+ * The underlying streams must provide elements in a consistent order such
+ * that they correspond.
+ *
+ * Typically, the underlying streams should have the same number of
+ * elements. If they do not, the behavior is determined by the
+ * `mismatchMode` argument.
+ *
+ * The nested structure of the `iterators` argument determines the
+ * structure of elements in the resulting iterator.
+ *
+ * Doing this in a concurrency-safe way requires some trickery. In
+ * particular, we want this stream to return the elements from the
+ * underlying streams in the correct order according to when next() was
+ * called, even if the resulting Promises resolve in a different order.
+ *
+ * @param iterators: An array or object containing LazyIterators at the
+ * leaves.
+ * @param mismatchMode: Determines what to do when one underlying iterator
+ * is exhausted before the others. `ZipMismatchMode.FAIL` (the default)
+ * causes an error to be thrown in this case. `ZipMismatchMode.SHORTEST`
+ * causes the zipped iterator to terminate with the furst underlying
+ * streams, so elements remaining on the longer streams are ignored.
+ * `ZipMismatchMode.LONGEST` causes the zipped stream to continue, filling
+ * in nulls for the exhausted streams, until all streams are exhausted.
+ */
+class lazy_iterator_ZipIterator extends lazy_iterator_LazyIterator {
+ constructor(iterators, mismatchMode = ZipMismatchMode.FAIL) {
+ super();
+ this.iterators = iterators;
+ this.mismatchMode = mismatchMode;
+ this.count = 0;
+ this.currentPromise = null;
+ }
+ summary() {
+ const upstreamSummaries = 'TODO: fill in upstream of zip summaries';
+ return `{${upstreamSummaries}} -> Zip`;
+ }
+ async nextState(afterState) {
+ // This chaining ensures that the underlying next() are not even called
+ // before the previous ones have resolved.
+ await afterState;
+ // Collect underlying iterator "done" signals as a side effect in
+ // getNext()
+ let numIterators = 0;
+ let iteratorsDone = 0;
+ function getNext(container) {
+ if (container instanceof lazy_iterator_LazyIterator) {
+ const result = container.next();
+ return {
+ value: result.then(x => {
+ numIterators++;
+ if (x.done) {
+ iteratorsDone++;
+ }
+ return x.value;
+ }),
+ recurse: false
+ };
+ }
+ else {
+ return { value: null, recurse: true };
+ }
+ }
+ const mapped = await Object(deep_map["c" /* deepMapAndAwaitAll */])(this.iterators, getNext);
+ if (numIterators === iteratorsDone) {
+ // The streams have all ended.
+ return { value: null, done: true };
+ }
+ if (iteratorsDone > 0) {
+ switch (this.mismatchMode) {
+ case ZipMismatchMode.FAIL:
+ throw new Error('Zipped streams should have the same length. ' +
+ `Mismatched at element ${this.count}.`);
+ case ZipMismatchMode.SHORTEST:
+ return { value: null, done: true };
+ case ZipMismatchMode.LONGEST:
+ default:
+ // Continue. The exhausted streams already produced value: null.
+ }
+ }
+ this.count++;
+ return { value: mapped, done: false };
+ }
+ async next() {
+ this.currentPromise = this.nextState(this.currentPromise);
+ return this.currentPromise;
+ }
+}
+// Iterators that maintain a ring buffer of pending promises
+// ============================================================================
+/**
+ * A stream that prefetches a given number of items from an upstream source,
+ * returning them in FIFO order.
+ *
+ * Note this prefetches Promises, but makes no guarantees about when those
+ * Promises resolve.
+ */
+class lazy_iterator_PrefetchIterator extends lazy_iterator_LazyIterator {
+ constructor(upstream, bufferSize) {
+ super();
+ this.upstream = upstream;
+ this.bufferSize = bufferSize;
+ this.buffer = new RingBuffer(bufferSize);
+ }
+ summary() {
+ return `${this.upstream.summary()} -> Prefetch`;
+ }
+ /**
+ * Refill the prefetch buffer. Returns only after the buffer is full, or
+ * the upstream source is exhausted.
+ */
+ refill() {
+ while (!this.buffer.isFull()) {
+ const v = this.upstream.next();
+ this.buffer.push(v);
+ }
+ }
+ next() {
+ this.refill();
+ // This shift will never throw an error because the buffer is always
+ // full after a refill. If the stream is exhausted, the buffer will be
+ // full of Promises that will resolve to the end-of-stream signal.
+ return this.buffer.shift();
+ }
+}
+/**
+ * A stream that performs a sliding-window random shuffle on an upstream
+ * source. This is like a `PrefetchIterator` except that the items are
+ * returned in randomized order. Mixing naturally improves as the buffer
+ * size increases.
+ */
+class lazy_iterator_ShuffleIterator extends lazy_iterator_PrefetchIterator {
+ constructor(upstream, windowSize, seed) {
+ super(upstream, windowSize);
+ this.upstream = upstream;
+ this.windowSize = windowSize;
+ // Local state that should not be clobbered by out-of-order execution.
+ this.upstreamExhausted = false;
+ this.random = seedrandom["alea"](seed || dist["util"].now().toString());
+ this.lastRead = Promise.resolve({ value: null, done: false });
+ }
+ async next() {
+ // This sets this.lastRead to a new Promise right away, as opposed to
+ // saying `await this.lastRead; this.lastRead = this.serialNext();` which
+ // would not work because this.nextRead would be updated only after the
+ // promise resolves.
+ this.lastRead = this.lastRead.then(() => this.serialNext());
+ return this.lastRead;
+ }
+ randomInt(max) {
+ return Math.floor(this.random() * max);
+ }
+ chooseIndex() {
+ return this.randomInt(this.buffer.length());
+ }
+ async serialNext() {
+ // TODO(soergel): consider performance
+ if (!this.upstreamExhausted) {
+ this.refill();
+ }
+ while (!this.buffer.isEmpty()) {
+ const chosenIndex = this.chooseIndex();
+ const result = await this.buffer.shuffleExcise(chosenIndex);
+ if (result.done) {
+ this.upstreamExhausted = true;
+ }
+ else {
+ this.refill();
+ return result;
+ }
+ }
+ return { value: null, done: true };
+ }
+}
+//# sourceMappingURL=lazy_iterator.js.map
+
+/***/ }),
+/* 15 */
+/***/ (function(module, __webpack_exports__, __webpack_require__) {
+
+"use strict";
+/* WEBPACK VAR INJECTION */(function(Buffer) {/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return OperationMapper; });
+/* unused harmony export decodeBase64 */
+/* unused harmony export parseStringParam */
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "i", function() { return getStringParam; });
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "c", function() { return getBoolParam; });
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "f", function() { return getNumberParam; });
+/* unused harmony export parseDtypeParam */
+/* unused harmony export getFuncParam */
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "e", function() { return getDtypeParam; });
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "d", function() { return getDtypeArrayParam; });
+/* unused harmony export parseTensorShapeParam */
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "k", function() { return getTensorShapeParam; });
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "g", function() { return getNumericArrayParam; });
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "h", function() { return getStringArrayParam; });
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "j", function() { return getTensorShapeArrayParam; });
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "b", function() { return getBoolArrayParam; });
+/* harmony import */ var _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(0);
+/* harmony import */ var _data_compiled_api__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(21);
+/* harmony import */ var _custom_op_register__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(24);
+/* harmony import */ var _executors_utils__WEBPACK_IMPORTED_MODULE_3__ = __webpack_require__(2);
+/* harmony import */ var _op_list_arithmetic__WEBPACK_IMPORTED_MODULE_4__ = __webpack_require__(41);
+/* harmony import */ var _op_list_basic_math__WEBPACK_IMPORTED_MODULE_5__ = __webpack_require__(42);
+/* harmony import */ var _op_list_control__WEBPACK_IMPORTED_MODULE_6__ = __webpack_require__(43);
+/* harmony import */ var _op_list_convolution__WEBPACK_IMPORTED_MODULE_7__ = __webpack_require__(44);
+/* harmony import */ var _op_list_creation__WEBPACK_IMPORTED_MODULE_8__ = __webpack_require__(45);
+/* harmony import */ var _op_list_dynamic__WEBPACK_IMPORTED_MODULE_9__ = __webpack_require__(46);
+/* harmony import */ var _op_list_evaluation__WEBPACK_IMPORTED_MODULE_10__ = __webpack_require__(47);
+/* harmony import */ var _op_list_graph__WEBPACK_IMPORTED_MODULE_11__ = __webpack_require__(48);
+/* harmony import */ var _op_list_image__WEBPACK_IMPORTED_MODULE_12__ = __webpack_require__(49);
+/* harmony import */ var _op_list_logical__WEBPACK_IMPORTED_MODULE_13__ = __webpack_require__(50);
+/* harmony import */ var _op_list_matrices__WEBPACK_IMPORTED_MODULE_14__ = __webpack_require__(51);
+/* harmony import */ var _op_list_normalization__WEBPACK_IMPORTED_MODULE_15__ = __webpack_require__(52);
+/* harmony import */ var _op_list_reduction__WEBPACK_IMPORTED_MODULE_16__ = __webpack_require__(53);
+/* harmony import */ var _op_list_slice_join__WEBPACK_IMPORTED_MODULE_17__ = __webpack_require__(54);
+/* harmony import */ var _op_list_spectral__WEBPACK_IMPORTED_MODULE_18__ = __webpack_require__(55);
+/* harmony import */ var _op_list_transformation__WEBPACK_IMPORTED_MODULE_19__ = __webpack_require__(56);
+/**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+class OperationMapper {
+ // Singleton instance for the mapper
+ static get Instance() {
+ return this._instance || (this._instance = new this());
+ }
+ // Loads the op mapping from the JSON file.
+ constructor() {
+ const ops = [
+ _op_list_arithmetic__WEBPACK_IMPORTED_MODULE_4__, _op_list_basic_math__WEBPACK_IMPORTED_MODULE_5__, _op_list_control__WEBPACK_IMPORTED_MODULE_6__, _op_list_convolution__WEBPACK_IMPORTED_MODULE_7__, _op_list_creation__WEBPACK_IMPORTED_MODULE_8__, _op_list_dynamic__WEBPACK_IMPORTED_MODULE_9__,
+ _op_list_evaluation__WEBPACK_IMPORTED_MODULE_10__, _op_list_logical__WEBPACK_IMPORTED_MODULE_13__, _op_list_image__WEBPACK_IMPORTED_MODULE_12__, _op_list_graph__WEBPACK_IMPORTED_MODULE_11__, _op_list_matrices__WEBPACK_IMPORTED_MODULE_14__, _op_list_normalization__WEBPACK_IMPORTED_MODULE_15__, _op_list_reduction__WEBPACK_IMPORTED_MODULE_16__,
+ _op_list_slice_join__WEBPACK_IMPORTED_MODULE_17__, _op_list_spectral__WEBPACK_IMPORTED_MODULE_18__, _op_list_transformation__WEBPACK_IMPORTED_MODULE_19__
+ ];
+ const mappersJson = [].concat(...ops.map(op => op.json));
+ this.opMappers = mappersJson.reduce((map, mapper) => {
+ map[mapper.tfOpName] = mapper;
+ return map;
+ }, {});
+ }
+ // Converts the model from Tensorflow GraphDef to local representation for
+ // TensorFlow.js API
+ transformGraph(graph, signature = {}) {
+ const tfNodes = graph.node;
+ const placeholders = [];
+ const weights = [];
+ const nodes = tfNodes.reduce((map, node) => {
+ map[node.name] = this.mapNode(node);
+ if (node.op.startsWith('Placeholder')) {
+ placeholders.push(map[node.name]);
+ }
+ if (node.op === 'Const') {
+ weights.push(map[node.name]);
+ }
+ return map;
+ }, {});
+ let inputs = [];
+ const outputs = [];
+ let inputNodeNameToKey = {};
+ let outputNodeNameToKey = {};
+ if (signature != null) {
+ inputNodeNameToKey = this.mapSignatureEntries(signature.inputs);
+ outputNodeNameToKey = this.mapSignatureEntries(signature.outputs);
+ }
+ const allNodes = Object.keys(nodes);
+ allNodes.forEach(key => {
+ const node = nodes[key];
+ node.inputNames.forEach(name => {
+ const [nodeName,] = Object(_executors_utils__WEBPACK_IMPORTED_MODULE_3__[/* getNodeNameAndIndex */ "a"])(name);
+ node.inputs.push(nodes[nodeName]);
+ nodes[nodeName].children.push(node);
+ });
+ });
+ // if signature has not outputs set, add any node that does not have
+ // outputs.
+ if (Object.keys(outputNodeNameToKey).length === 0) {
+ allNodes.forEach(key => {
+ const node = nodes[key];
+ if (node.children.length === 0) {
+ outputs.push(node);
+ }
+ });
+ }
+ else {
+ Object.keys(outputNodeNameToKey).forEach(name => {
+ const [nodeName,] = Object(_executors_utils__WEBPACK_IMPORTED_MODULE_3__[/* getNodeNameAndIndex */ "a"])(name);
+ const node = nodes[nodeName];
+ if (node != null) {
+ node.signatureKey = outputNodeNameToKey[name];
+ outputs.push(node);
+ }
+ });
+ }
+ if (Object.keys(inputNodeNameToKey).length > 0) {
+ Object.keys(inputNodeNameToKey).forEach(name => {
+ const [nodeName,] = Object(_executors_utils__WEBPACK_IMPORTED_MODULE_3__[/* getNodeNameAndIndex */ "a"])(name);
+ const node = nodes[nodeName];
+ if (node) {
+ node.signatureKey = inputNodeNameToKey[name];
+ inputs.push(node);
+ }
+ });
+ }
+ else {
+ inputs = placeholders;
+ }
+ let functions = {};
+ if (graph.library != null && graph.library.function != null) {
+ functions = graph.library.function.reduce((functions, func) => {
+ functions[func.signature.name] = this.mapFunction(func);
+ return functions;
+ }, {});
+ }
+ return {
+ nodes,
+ inputs,
+ outputs,
+ weights,
+ placeholders,
+ signature,
+ functions
+ };
+ }
+ mapSignatureEntries(entries) {
+ return Object.keys(entries || {})
+ .reduce((prev, curr) => {
+ prev[entries[curr].name] = curr;
+ return prev;
+ }, {});
+ }
+ mapNode(node) {
+ // Unsupported ops will cause an error at run-time (not parse time), since
+ // they may not be used by the actual execution subgraph.
+ const mapper = Object(_custom_op_register__WEBPACK_IMPORTED_MODULE_2__[/* getRegisteredOp */ "b"])(node.op) || this.opMappers[node.op] || {};
+ if (node.attr == null) {
+ node.attr = {};
+ }
+ const newNode = {
+ name: node.name,
+ op: node.op,
+ category: mapper.category,
+ inputNames: (node.input ||
+ []).map(input => input.startsWith('^') ? input.substr(1) : input),
+ inputs: [],
+ children: [],
+ inputParams: {},
+ attrParams: {},
+ rawAttrs: node.attr
+ };
+ if (mapper.inputs != null) {
+ newNode.inputParams =
+ mapper.inputs.reduce((map, param) => {
+ map[param.name] = {
+ type: param.type,
+ inputIndexStart: param.start,
+ inputIndexEnd: param.end
+ };
+ return map;
+ }, {});
+ }
+ if (mapper.attrs != null) {
+ newNode.attrParams =
+ mapper.attrs.reduce((map, param) => {
+ const type = param.type;
+ let value = undefined;
+ switch (param.type) {
+ case 'string':
+ value = getStringParam(node.attr, param.tfName, param.defaultValue);
+ if (value === undefined && !!param.tfDeprecatedName) {
+ value = getStringParam(node.attr, param.tfDeprecatedName, param.defaultValue);
+ }
+ break;
+ case 'string[]':
+ value = getStringArrayParam(node.attr, param.tfName, param.defaultValue);
+ if (value === undefined && !!param.tfDeprecatedName) {
+ value = getStringArrayParam(node.attr, param.tfDeprecatedName, param.defaultValue);
+ }
+ break;
+ case 'number':
+ value = getNumberParam(node.attr, param.tfName, (param.defaultValue || 0));
+ if (value === undefined && !!param.tfDeprecatedName) {
+ value = getNumberParam(node.attr, param.tfDeprecatedName, param.defaultValue);
+ }
+ break;
+ case 'number[]':
+ value = getNumericArrayParam(node.attr, param.tfName, param.defaultValue);
+ if (value === undefined && !!param.tfDeprecatedName) {
+ value = getNumericArrayParam(node.attr, param.tfDeprecatedName, param.defaultValue);
+ }
+ break;
+ case 'bool':
+ value = getBoolParam(node.attr, param.tfName, param.defaultValue);
+ if (value === undefined && !!param.tfDeprecatedName) {
+ value = getBoolParam(node.attr, param.tfDeprecatedName, param.defaultValue);
+ }
+ break;
+ case 'bool[]':
+ value = getBoolArrayParam(node.attr, param.tfName, param.defaultValue);
+ if (value === undefined && !!param.tfDeprecatedName) {
+ value = getBoolArrayParam(node.attr, param.tfDeprecatedName, param.defaultValue);
+ }
+ break;
+ case 'shape':
+ value = getTensorShapeParam(node.attr, param.tfName, param.defaultValue);
+ if (value === undefined && !!param.tfDeprecatedName) {
+ value = getTensorShapeParam(node.attr, param.tfDeprecatedName, param.defaultValue);
+ }
+ break;
+ case 'shape[]':
+ value = getTensorShapeArrayParam(node.attr, param.tfName, param.defaultValue);
+ if (value === undefined && !!param.tfDeprecatedName) {
+ value = getTensorShapeArrayParam(node.attr, param.tfDeprecatedName, param.defaultValue);
+ }
+ break;
+ case 'dtype':
+ value = getDtypeParam(node.attr, param.tfName, param.defaultValue);
+ if (value === undefined && !!param.tfDeprecatedName) {
+ value = getDtypeParam(node.attr, param.tfDeprecatedName, param.defaultValue);
+ }
+ break;
+ case 'dtype[]':
+ value = getDtypeArrayParam(node.attr, param.tfName, param.defaultValue);
+ if (value === undefined && !!param.tfDeprecatedName) {
+ value = getDtypeArrayParam(node.attr, param.tfDeprecatedName, param.defaultValue);
+ }
+ break;
+ case 'func':
+ value = getFuncParam(node.attr, param.tfName, param.defaultValue);
+ if (value === undefined && !!param.tfDeprecatedName) {
+ value = getFuncParam(node.attr, param.tfDeprecatedName, param.defaultValue);
+ }
+ break;
+ case 'tensor':
+ case 'tensors':
+ break;
+ default:
+ throw new Error(`Unsupported param type: ${param.type} for op: ${node.op}`);
+ }
+ map[param.name] = { value, type };
+ return map;
+ }, {});
+ }
+ return newNode;
+ }
+ // map the TFunctionDef to TFJS graph object
+ mapFunction(functionDef) {
+ const tfNodes = functionDef.nodeDef;
+ const placeholders = [];
+ const weights = [];
+ let nodes = {};
+ if (tfNodes != null) {
+ nodes = tfNodes.reduce((map, node) => {
+ map[node.name] = this.mapNode(node);
+ if (node.op === 'Const') {
+ weights.push(map[node.name]);
+ }
+ return map;
+ }, {});
+ }
+ const inputs = [];
+ const outputs = [];
+ functionDef.signature.inputArg.forEach(arg => {
+ const [nodeName,] = Object(_executors_utils__WEBPACK_IMPORTED_MODULE_3__[/* getNodeNameAndIndex */ "a"])(arg.name);
+ const node = {
+ name: nodeName,
+ op: 'Placeholder',
+ inputs: [],
+ inputNames: [],
+ category: 'graph',
+ inputParams: {},
+ attrParams: { dtype: { value: parseDtypeParam(arg.type), type: 'dtype' } },
+ children: []
+ };
+ node.signatureKey = arg.name;
+ inputs.push(node);
+ nodes[nodeName] = node;
+ });
+ const allNodes = Object.keys(nodes);
+ allNodes.forEach(key => {
+ const node = nodes[key];
+ node.inputNames.forEach(name => {
+ const [nodeName,] = Object(_executors_utils__WEBPACK_IMPORTED_MODULE_3__[/* getNodeNameAndIndex */ "a"])(name);
+ node.inputs.push(nodes[nodeName]);
+ nodes[nodeName].children.push(node);
+ });
+ });
+ const returnNodeMap = functionDef.ret;
+ functionDef.signature.outputArg.forEach(output => {
+ const [nodeName, index] = Object(_executors_utils__WEBPACK_IMPORTED_MODULE_3__[/* getNodeNameAndIndex */ "a"])(returnNodeMap[output.name]);
+ const node = nodes[nodeName];
+ if (node != null) {
+ node.defaultOutput = index;
+ outputs.push(node);
+ }
+ });
+ const signature = this.mapArgsToSignature(functionDef);
+ return { nodes, inputs, outputs, weights, placeholders, signature };
+ }
+ mapArgsToSignature(functionDef) {
+ return {
+ methodName: functionDef.signature.name,
+ inputs: functionDef.signature.inputArg.reduce((map, arg) => {
+ map[arg.name] = this.mapArgToTensorInfo(arg);
+ return map;
+ }, {}),
+ outputs: functionDef.signature.outputArg.reduce((map, arg) => {
+ map[arg.name] = this.mapArgToTensorInfo(arg, functionDef.ret);
+ return map;
+ }, {}),
+ };
+ }
+ mapArgToTensorInfo(arg, nameMap) {
+ let name = arg.name;
+ if (nameMap != null) {
+ name = nameMap[name];
+ }
+ return { name, dtype: arg.type };
+ }
+}
+function decodeBase64(text) {
+ const global = Object(_tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__["env"])().global;
+ if (typeof global.atob !== 'undefined') {
+ return global.atob(text);
+ }
+ else if (typeof Buffer !== 'undefined') {
+ return new Buffer(text, 'base64').toString();
+ }
+ else {
+ throw new Error('Unable to decode base64 in this environment. ' +
+ 'Missing built-in atob() or Buffer()');
+ }
+}
+function parseStringParam(s, keepCase) {
+ const value = Array.isArray(s) ? String.fromCharCode.apply(null, s) : decodeBase64(s);
+ return keepCase ? value : value.toLowerCase();
+}
+function getStringParam(attrs, name, def, keepCase = false) {
+ const param = attrs[name];
+ if (param != null) {
+ return parseStringParam(param.s, keepCase);
+ }
+ return def;
+}
+function getBoolParam(attrs, name, def) {
+ const param = attrs[name];
+ return param ? param.b : def;
+}
+function getNumberParam(attrs, name, def) {
+ const param = attrs[name] || {};
+ const value = param['i'] != null ? param['i'] : (param['f'] != null ? param['f'] : def);
+ return (typeof value === 'number') ? value : parseInt(value, 10);
+}
+function parseDtypeParam(value) {
+ if (typeof (value) === 'string') {
+ // tslint:disable-next-line:no-any
+ value = _data_compiled_api__WEBPACK_IMPORTED_MODULE_1__[/* DataType */ "a"][value];
+ }
+ switch (value) {
+ case _data_compiled_api__WEBPACK_IMPORTED_MODULE_1__[/* DataType */ "a"].DT_FLOAT:
+ return 'float32';
+ case _data_compiled_api__WEBPACK_IMPORTED_MODULE_1__[/* DataType */ "a"].DT_INT32:
+ case _data_compiled_api__WEBPACK_IMPORTED_MODULE_1__[/* DataType */ "a"].DT_INT64:
+ case _data_compiled_api__WEBPACK_IMPORTED_MODULE_1__[/* DataType */ "a"].DT_INT8:
+ case _data_compiled_api__WEBPACK_IMPORTED_MODULE_1__[/* DataType */ "a"].DT_UINT8:
+ return 'int32';
+ case _data_compiled_api__WEBPACK_IMPORTED_MODULE_1__[/* DataType */ "a"].DT_BOOL:
+ return 'bool';
+ case _data_compiled_api__WEBPACK_IMPORTED_MODULE_1__[/* DataType */ "a"].DT_DOUBLE:
+ return 'float32';
+ case _data_compiled_api__WEBPACK_IMPORTED_MODULE_1__[/* DataType */ "a"].DT_STRING:
+ return 'string';
+ default:
+ // Unknown dtype error will happen at runtime (instead of parse time),
+ // since these nodes might not be used by the actual subgraph execution.
+ return null;
+ }
+}
+function getFuncParam(attrs, name, def) {
+ const param = attrs[name];
+ if (param && param.func) {
+ return param.func.name;
+ }
+ return def;
+}
+function getDtypeParam(attrs, name, def) {
+ const param = attrs[name];
+ if (param && param.type) {
+ return parseDtypeParam(param.type);
+ }
+ return def;
+}
+function getDtypeArrayParam(attrs, name, def) {
+ const param = attrs[name];
+ if (param && param.list && param.list.type) {
+ return param.list.type.map(v => parseDtypeParam(v));
+ }
+ return def;
+}
+function parseTensorShapeParam(shape) {
+ if (shape.unknownRank) {
+ return undefined;
+ }
+ if (shape.dim != null) {
+ return shape.dim.map(dim => (typeof dim.size === 'number') ? dim.size : parseInt(dim.size, 10));
+ }
+ return [];
+}
+function getTensorShapeParam(attrs, name, def) {
+ const param = attrs[name];
+ if (param && param.shape) {
+ return parseTensorShapeParam(param.shape);
+ }
+ return def;
+}
+function getNumericArrayParam(attrs, name, def) {
+ const param = attrs[name];
+ if (param) {
+ return ((param.list.f && param.list.f.length ? param.list.f :
+ param.list.i) ||
+ [])
+ .map(v => (typeof v === 'number') ? v : parseInt(v, 10));
+ }
+ return def;
+}
+function getStringArrayParam(attrs, name, def, keepCase = false) {
+ const param = attrs[name];
+ if (param && param.list && param.list.s) {
+ return param.list.s.map((v) => {
+ return parseStringParam(v, keepCase);
+ });
+ }
+ return def;
+}
+function getTensorShapeArrayParam(attrs, name, def) {
+ const param = attrs[name];
+ if (param && param.list && param.list.shape) {
+ return param.list.shape.map((v) => {
+ return parseTensorShapeParam(v);
+ });
+ }
+ return def;
+}
+function getBoolArrayParam(attrs, name, def) {
+ const param = attrs[name];
+ if (param && param.list && param.list.b) {
+ return param.list.b;
+ }
+ return def;
+}
+//# sourceMappingURL=operation_mapper.js.map
+/* WEBPACK VAR INJECTION */}.call(this, __webpack_require__(39).Buffer))
+
+/***/ }),
+/* 16 */
+/***/ (function(module, exports) {
+
+module.exports = function() {
+ throw new Error("define cannot be used indirect");
+};
+
+
+/***/ }),
+/* 17 */
+/***/ (function(module, __webpack_exports__, __webpack_require__) {
+
+"use strict";
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "b", function() { return getKernel; });
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return getGradient; });
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "c", function() { return getKernelsForBackend; });
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "e", function() { return registerKernel; });
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "d", function() { return registerGradient; });
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "g", function() { return unregisterKernel; });
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "f", function() { return unregisterGradient; });
+/* harmony import */ var _global_util__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(33);
+/**
+ * @license
+ * Copyright 2019 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+const kernelRegistry = Object(_global_util__WEBPACK_IMPORTED_MODULE_0__[/* getGlobal */ "a"])('kernelRegistry', () => new Map());
+const gradRegistry = Object(_global_util__WEBPACK_IMPORTED_MODULE_0__[/* getGlobal */ "a"])('gradRegistry', () => new Map());
+/**
+ * Returns the kernel function (code) associated with the provided names.
+ *
+ * @param kernelName The official name of the kernel.
+ * @param backendName The official name of the backend.
+ */
+function getKernel(kernelName, backendName) {
+ const key = makeKey(kernelName, backendName);
+ return kernelRegistry.get(key);
+}
+/**
+ * Returns the registered gradient info associated with the provided kernel.
+ * @param kernelName The official TF kernel name.
+ */
+function getGradient(kernelName) {
+ return gradRegistry.get(kernelName);
+}
+function getKernelsForBackend(backendName) {
+ const it = kernelRegistry.entries();
+ const result = [];
+ while (true) {
+ const { done, value } = it.next();
+ if (done) {
+ break;
+ }
+ const [key, config] = value;
+ const [backend,] = key.split('_');
+ if (backend === backendName) {
+ result.push(config);
+ }
+ }
+ return result;
+}
+/**
+ * Registers the function (forward pass) for the kernel in a global registry.
+ *
+ * @param config A config object with the following properties:
+ * - `kernelName` The official name of the kernel.
+ * - `backendName` The official name of the backend.
+ * - `kernelFunc` The function to run during the forward pass of the kernel.
+ * - `setupFunc` Optional. Gets called once, after the backend initializes.
+ * - `disposeFunc` Optional. Gets called once, right before the backend is
+ * disposed.
+ */
+function registerKernel(config) {
+ const { kernelName, backendName } = config;
+ const key = makeKey(kernelName, backendName);
+ if (kernelRegistry.has(key)) {
+ console.warn(`The kernel '${kernelName}' for backend ` +
+ `'${backendName}' is already registered`);
+ }
+ kernelRegistry.set(key, config);
+}
+/**
+ * Registers a gradient function for a given kernel in the global registry,
+ * to be used during the back-propagation of that kernel.
+ *
+ * @param config An object with the following properties:
+ * - `kernelName` The name of the kernel that the gradient function is for.
+ * - `gradFunc` The function to run during back-propagation.
+ */
+function registerGradient(config) {
+ const { kernelName } = config;
+ if (gradRegistry.has(kernelName)) {
+ console.warn(`Overriding the gradient for '${kernelName}'`);
+ }
+ gradRegistry.set(kernelName, config);
+}
+/**
+ * Removes the kernel function from the registry.
+ *
+ * @param kernelName The official name of the kernel.
+ * @param backendName The official name of the backend.
+ *
+ */
+function unregisterKernel(kernelName, backendName) {
+ const key = makeKey(kernelName, backendName);
+ if (!kernelRegistry.has(key)) {
+ throw new Error(`The kernel '${kernelName}' for backend ` +
+ `'${backendName}' is not registered`);
+ }
+ kernelRegistry.delete(key);
+}
+/** Removes the registered gradient from the global registry. */
+function unregisterGradient(kernelName) {
+ if (!gradRegistry.has(kernelName)) {
+ throw new Error(`The gradient '${kernelName}' for backend is not registered`);
+ }
+ gradRegistry.delete(kernelName);
+}
+function makeKey(kernelName, backendName) {
+ return `${backendName}_${kernelName}`;
+}
+//# sourceMappingURL=kernel_registry.js.map
+
+/***/ }),
+/* 18 */
+/***/ (function(module, __webpack_exports__, __webpack_require__) {
+
+"use strict";
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return complex; });
+/* harmony import */ var _engine__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(5);
+/* harmony import */ var _kernel_names__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(6);
+/* harmony import */ var _tensor_util_env__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(3);
+/* harmony import */ var _util__WEBPACK_IMPORTED_MODULE_3__ = __webpack_require__(1);
+/* harmony import */ var _operation__WEBPACK_IMPORTED_MODULE_4__ = __webpack_require__(4);
+/**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+
+
+
+
+/**
+ * Converts two real numbers to a complex number.
+ *
+ * Given a tensor `real` representing the real part of a complex number, and a
+ * tensor `imag` representing the imaginary part of a complex number, this
+ * operation returns complex numbers elementwise of the form [r0, i0, r1, i1],
+ * where r represents the real part and i represents the imag part.
+ *
+ * The input tensors real and imag must have the same shape.
+ *
+ * ```js
+ * const real = tf.tensor1d([2.25, 3.25]);
+ * const imag = tf.tensor1d([4.75, 5.75]);
+ * const complex = tf.complex(real, imag);
+ *
+ * complex.print();
+ * ```
+ */
+/** @doc {heading: 'Tensors', subheading: 'Creation'} */
+function complex_(real, imag) {
+ const $real = Object(_tensor_util_env__WEBPACK_IMPORTED_MODULE_2__[/* convertToTensor */ "a"])(real, 'real', 'complex');
+ const $imag = Object(_tensor_util_env__WEBPACK_IMPORTED_MODULE_2__[/* convertToTensor */ "a"])(imag, 'imag', 'complex');
+ _util__WEBPACK_IMPORTED_MODULE_3__["assertShapesMatch"]($real.shape, $imag.shape, `real and imag shapes, ${$real.shape} and ${$imag.shape}, ` +
+ `must match in call to tf.complex().`);
+ const forward = (backend) => {
+ return backend.complex($real, $imag);
+ };
+ const inputs = { real: $real, imag: $imag };
+ return _engine__WEBPACK_IMPORTED_MODULE_0__[/* ENGINE */ "a"].runKernelFunc(forward, inputs, null /* gradient */, _kernel_names__WEBPACK_IMPORTED_MODULE_1__[/* Complex */ "k"]);
+}
+const complex = Object(_operation__WEBPACK_IMPORTED_MODULE_4__[/* op */ "a"])({ complex_ });
+//# sourceMappingURL=complex.js.map
+
+/***/ }),
+/* 19 */
+/***/ (function(module, __webpack_exports__, __webpack_require__) {
+
+"use strict";
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "b", function() { return deepMap; });
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "d", function() { return deepZip; });
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "f", function() { return zipToList; });
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "c", function() { return deepMapAndAwaitAll; });
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "e", function() { return isIterable; });
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return canTensorify; });
+/* harmony import */ var _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(0);
+/**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * =============================================================================
+ */
+
+/**
+ * Apply a mapping function to a nested structure in a recursive manner.
+ *
+ * The result of the mapping is an object with the same nested structure (i.e.,
+ * of arrays and dicts) as the input, except that some subtrees are replaced,
+ * according to the results of the mapping function.
+ *
+ * Mappings are memoized. Thus, if the nested structure contains the same
+ * object in multiple positions, the output will contain the same mapped object
+ * in those positions. Cycles are not supported, however.
+ *
+ * @param input: The object to which to apply the mapping function.
+ * @param mapFn: A function that expects a single node of the object tree, and
+ * returns a `DeepMapResult`. The `DeepMapResult` either provides a
+ * replacement value for that node (i.e., replacing the subtree), or indicates
+ * that the node should be processed recursively.
+ */
+function deepMap(input, mapFn) {
+ return deepMapInternal(input, mapFn);
+}
+/**
+ * @param seen: A Map of known object mappings (i.e., memoized results of
+ * `mapFn()`)
+ * @param containedIn: An set containing objects on the reference path currently
+ * being processed (used to detect cycles).
+ */
+function deepMapInternal(input, mapFn, seen = new Map(), containedIn = new Set()) {
+ if (input == null) {
+ return null;
+ }
+ if (containedIn.has(input)) {
+ throw new Error('Circular references are not supported.');
+ }
+ if (seen.has(input)) {
+ return seen.get(input);
+ }
+ const result = mapFn(input);
+ if (result.recurse && result.value !== null) {
+ throw new Error('A deep map function may not return both a value and recurse=true.');
+ }
+ if (!result.recurse) {
+ seen.set(input, result.value);
+ return result.value;
+ }
+ else if (isIterable(input)) {
+ // tslint:disable-next-line:no-any
+ const mappedIterable = Array.isArray(input) ? [] : {};
+ containedIn.add(input);
+ for (const k in input) {
+ const child = input[k];
+ const childResult = deepMapInternal(child, mapFn, seen, containedIn);
+ mappedIterable[k] = childResult;
+ }
+ containedIn.delete(input);
+ return mappedIterable;
+ }
+ else {
+ throw new Error(`Can't recurse into non-iterable type: ${input}`);
+ }
+}
+// TODO(soergel, kangyizhang) Reconsider naming of deepZip() to avoid confusion
+// with zip()
+/**
+ * Zip nested structures together in a recursive manner.
+ *
+ * This has the effect of transposing or pivoting data, e.g. converting it from
+ * a row-major representation to a column-major representation.
+ *
+ * For example, `deepZip([{a: 1, b: 2}, {a: 3, b: 4}])` returns
+ * `{a: [1, 3], b: [2, 4]}`.
+ *
+ * The inputs should all have the same nested structure (i.e., of arrays and
+ * dicts). The result is a single object with the same nested structure, where
+ * the leaves are arrays collecting the values of the inputs at that location
+ * (or, optionally, the result of a custom function applied to those arrays).
+ *
+ * @param inputs: An array of the objects to zip together.
+ * @param zipFn: (optional) A function that expects an array of elements at a
+ * single node of the object tree, and returns a `DeepMapResult`. The
+ * `DeepMapResult` either provides a result value for that node (i.e.,
+ * representing the subtree), or indicates that the node should be processed
+ * recursively. The default zipFn recurses as far as possible and places
+ * arrays at the leaves.
+ */
+function deepZip(inputs, zipFn = zipToList) {
+ return deepZipInternal(inputs, zipFn);
+}
+/**
+ * @param containedIn: An set containing objects on the reference path currently
+ * being processed (used to detect cycles).
+ */
+function deepZipInternal(inputs, zipFn, containedIn = new Set()) {
+ // The recursion follows the structure of input 0; it's assumed that all the
+ // other inputs have the same structure.
+ const input = inputs[0];
+ if (containedIn.has(input)) {
+ throw new Error('Circular references are not supported.');
+ }
+ const result = zipFn(inputs);
+ if (result.recurse && result.value !== null) {
+ throw new Error('A deep zip function may not return both a value and recurse=true.');
+ }
+ if (!result.recurse) {
+ return result.value;
+ }
+ else if (isIterable(input)) {
+ // tslint:disable-next-line:no-any
+ const mappedIterable = Array.isArray(input) ? [] : {};
+ containedIn.add(input);
+ for (const k in input) {
+ const children = inputs.map(x => x[k]);
+ const childResult = deepZipInternal(children, zipFn, containedIn);
+ mappedIterable[k] = childResult;
+ }
+ containedIn.delete(input);
+ return mappedIterable;
+ }
+ else {
+ throw new Error(`Can't recurse into non-iterable type: ${input}`);
+ }
+}
+// tslint:disable-next-line:no-any
+function zipToList(x) {
+ if (x === null) {
+ return null;
+ }
+ // TODO(soergel): validate array type?
+ if (isIterable(x[0])) {
+ return { value: null, recurse: true };
+ }
+ else {
+ return { value: x, recurse: false };
+ }
+}
+/**
+ * Apply an async mapping function to a nested structure in a recursive manner.
+ *
+ * This first creates a nested structure of Promises, and then awaits all of
+ * those, resulting in a single Promise for a resolved nested structure.
+ *
+ * The result of the mapping is an object with the same nested structure (i.e.,
+ * of arrays and dicts) as the input, except that some subtrees are replaced,
+ * according to the results of the mapping function.
+ *
+ * Mappings are memoized. Thus, if the nested structure contains the same
+ * object in multiple positions, the output will contain the same mapped object
+ * in those positions. Cycles are not supported, however.
+ *
+ * @param input: The object to which to apply the mapping function.
+ * @param mapFn: A function that expects a single node of the object tree, and
+ * returns a `DeepMapAsyncResult`. The `DeepMapAsyncResult` either provides
+ * a `Promise` for a replacement value for that node (i.e., replacing the
+ * subtree), or indicates that the node should be processed recursively. Note
+ * that the decision whether or not to recurse must be made immediately; only
+ * the mapped value may be promised.
+ */
+async function deepMapAndAwaitAll(input, mapFn) {
+ const seen = new Map();
+ // First do a normal deepMap, collecting Promises in 'seen' as a side effect.
+ deepMapInternal(input, mapFn, seen);
+ // Replace the Promises in 'seen' in place.
+ // Note TypeScript provides no async map iteration, and regular map iteration
+ // is broken too, so sadly we have to do Array.from() to make it work.
+ // (There's no advantage to Promise.all(), and that would be tricky anyway.)
+ for (const key of Array.from(seen.keys())) {
+ const value = seen.get(key);
+ if (value instanceof Promise) {
+ const mappedValue = await value;
+ seen.set(key, mappedValue);
+ }
+ }
+ // Normal deepMap again, this time filling in the resolved values.
+ // It's unfortunate that we have to do two passes.
+ // TODO(soergel): test performance and think harder about a fast solution.
+ const result = deepMapInternal(input, mapFn, seen);
+ return result;
+}
+/**
+ * Determine whether the argument is iterable.
+ *
+ * @returns true if the argument is an array or any non-Tensor object.
+ */
+// tslint:disable-next-line:no-any
+function isIterable(obj) {
+ return obj != null && (!ArrayBuffer.isView(obj)) &&
+ (Array.isArray(obj) ||
+ (typeof obj === 'object' && !(obj instanceof _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__["Tensor"])));
+}
+/**
+ * Determine whether the argument can be converted to Tensor.
+ *
+ * Tensors, primitives, arrays, and TypedArrays all qualify; anything else does
+ * not.
+ *
+ * @returns true if the argument can be converted to Tensor.
+ */
+// tslint:disable-next-line:no-any
+function canTensorify(obj) {
+ return obj == null || isPrimitive(obj) || Array.isArray(obj) ||
+ (typeof obj === 'object' && (obj instanceof _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__["Tensor"])) ||
+ _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__["util"].isTypedArray(obj);
+}
+/**
+ * Returns true if the given `value` is a primitive type. Otherwise returns
+ * false. This is equivalant to node util.isPrimitive
+ */
+function isPrimitive(value) {
+ return (value === null ||
+ (typeof value !== 'object' && typeof value !== 'function'));
+}
+//# sourceMappingURL=deep_map.js.map
+
+/***/ }),
+/* 20 */
+/***/ (function(module, exports, __webpack_require__) {
+
+// A library of seedable RNGs implemented in Javascript.
+//
+// Usage:
+//
+// var seedrandom = require('seedrandom');
+// var random = seedrandom(1); // or any seed.
+// var x = random(); // 0 <= x < 1. Every bit is random.
+// var x = random.quick(); // 0 <= x < 1. 32 bits of randomness.
+
+// alea, a 53-bit multiply-with-carry generator by Johannes Baagøe.
+// Period: ~2^116
+// Reported to pass all BigCrush tests.
+var alea = __webpack_require__(68);
+
+// xor128, a pure xor-shift generator by George Marsaglia.
+// Period: 2^128-1.
+// Reported to fail: MatrixRank and LinearComp.
+var xor128 = __webpack_require__(69);
+
+// xorwow, George Marsaglia's 160-bit xor-shift combined plus weyl.
+// Period: 2^192-2^32
+// Reported to fail: CollisionOver, SimpPoker, and LinearComp.
+var xorwow = __webpack_require__(70);
+
+// xorshift7, by François Panneton and Pierre L'ecuyer, takes
+// a different approach: it adds robustness by allowing more shifts
+// than Marsaglia's original three. It is a 7-shift generator
+// with 256 bits, that passes BigCrush with no systmatic failures.
+// Period 2^256-1.
+// No systematic BigCrush failures reported.
+var xorshift7 = __webpack_require__(71);
+
+// xor4096, by Richard Brent, is a 4096-bit xor-shift with a
+// very long period that also adds a Weyl generator. It also passes
+// BigCrush with no systematic failures. Its long period may
+// be useful if you have many generators and need to avoid
+// collisions.
+// Period: 2^4128-2^32.
+// No systematic BigCrush failures reported.
+var xor4096 = __webpack_require__(72);
+
+// Tyche-i, by Samuel Neves and Filipe Araujo, is a bit-shifting random
+// number generator derived from ChaCha, a modern stream cipher.
+// https://eden.dei.uc.pt/~sneves/pubs/2011-snfa2.pdf
+// Period: ~2^127
+// No systematic BigCrush failures reported.
+var tychei = __webpack_require__(73);
+
+// The original ARC4-based prng included in this library.
+// Period: ~2^1600
+var sr = __webpack_require__(74);
+
+sr.alea = alea;
+sr.xor128 = xor128;
+sr.xorwow = xorwow;
+sr.xorshift7 = xorshift7;
+sr.xor4096 = xor4096;
+sr.tychei = tychei;
+
+module.exports = sr;
+
+
+/***/ }),
+/* 21 */
+/***/ (function(module, __webpack_exports__, __webpack_require__) {
+
+"use strict";
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return DataType; });
+/* unused harmony export SaverDef */
+/**
+ * @license
+ * Copyright 2019 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * =============================================================================
+ */
+/** DataType enum. */
+var DataType;
+(function (DataType) {
+ DataType[DataType["DT_INVALID"] = 0] = "DT_INVALID";
+ DataType[DataType["DT_FLOAT"] = 1] = "DT_FLOAT";
+ DataType[DataType["DT_DOUBLE"] = 2] = "DT_DOUBLE";
+ DataType[DataType["DT_INT32"] = 3] = "DT_INT32";
+ DataType[DataType["DT_UINT8"] = 4] = "DT_UINT8";
+ DataType[DataType["DT_INT16"] = 5] = "DT_INT16";
+ DataType[DataType["DT_INT8"] = 6] = "DT_INT8";
+ DataType[DataType["DT_STRING"] = 7] = "DT_STRING";
+ DataType[DataType["DT_COMPLEX64"] = 8] = "DT_COMPLEX64";
+ DataType[DataType["DT_INT64"] = 9] = "DT_INT64";
+ DataType[DataType["DT_BOOL"] = 10] = "DT_BOOL";
+ DataType[DataType["DT_QINT8"] = 11] = "DT_QINT8";
+ DataType[DataType["DT_QUINT8"] = 12] = "DT_QUINT8";
+ DataType[DataType["DT_QINT32"] = 13] = "DT_QINT32";
+ DataType[DataType["DT_BFLOAT16"] = 14] = "DT_BFLOAT16";
+ DataType[DataType["DT_FLOAT_REF"] = 101] = "DT_FLOAT_REF";
+ DataType[DataType["DT_DOUBLE_REF"] = 102] = "DT_DOUBLE_REF";
+ DataType[DataType["DT_INT32_REF"] = 103] = "DT_INT32_REF";
+ DataType[DataType["DT_UINT8_REF"] = 104] = "DT_UINT8_REF";
+ DataType[DataType["DT_INT16_REF"] = 105] = "DT_INT16_REF";
+ DataType[DataType["DT_INT8_REF"] = 106] = "DT_INT8_REF";
+ DataType[DataType["DT_STRING_REF"] = 107] = "DT_STRING_REF";
+ DataType[DataType["DT_COMPLEX64_REF"] = 108] = "DT_COMPLEX64_REF";
+ DataType[DataType["DT_INT64_REF"] = 109] = "DT_INT64_REF";
+ DataType[DataType["DT_BOOL_REF"] = 110] = "DT_BOOL_REF";
+ DataType[DataType["DT_QINT8_REF"] = 111] = "DT_QINT8_REF";
+ DataType[DataType["DT_QUINT8_REF"] = 112] = "DT_QUINT8_REF";
+ DataType[DataType["DT_QINT32_REF"] = 113] = "DT_QINT32_REF";
+ DataType[DataType["DT_BFLOAT16_REF"] = 114] = "DT_BFLOAT16_REF";
+})(DataType || (DataType = {}));
+var SaverDef;
+(function (SaverDef) {
+ /** CheckpointFormatVersion enum. */
+ let CheckpointFormatVersion;
+ (function (CheckpointFormatVersion) {
+ CheckpointFormatVersion[CheckpointFormatVersion["LEGACY"] = 0] = "LEGACY";
+ CheckpointFormatVersion[CheckpointFormatVersion["V1"] = 1] = "V1";
+ CheckpointFormatVersion[CheckpointFormatVersion["V2"] = 2] = "V2";
+ })(CheckpointFormatVersion = SaverDef.CheckpointFormatVersion || (SaverDef.CheckpointFormatVersion = {}));
+})(SaverDef || (SaverDef = {}));
+//# sourceMappingURL=compiled_api.js.map
+
+/***/ }),
+/* 22 */
+/***/ (function(module, __webpack_exports__, __webpack_require__) {
+
+"use strict";
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return real; });
+/* harmony import */ var _engine__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(5);
+/* harmony import */ var _kernel_names__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(6);
+/* harmony import */ var _tensor_util_env__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(3);
+/* harmony import */ var _operation__WEBPACK_IMPORTED_MODULE_3__ = __webpack_require__(4);
+/**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+
+
+
+/**
+ * Returns the real part of a complex (or real) tensor.
+ *
+ * Given a tensor input, this operation returns a tensor of type float that is
+ * the real part of each element in input considered as a complex number.
+ *
+ * If the input is real, it simply makes a clone.
+ *
+ * ```js
+ * const x = tf.complex([-2.25, 3.25], [4.75, 5.75]);
+ * tf.real(x).print();
+ * ```
+ */
+/** @doc {heading: 'Tensors', subheading: 'Creation'} */
+function real_(input) {
+ const $input = Object(_tensor_util_env__WEBPACK_IMPORTED_MODULE_2__[/* convertToTensor */ "a"])(input, 'input', 'real');
+ const forward = (backend) => {
+ return backend.real($input);
+ };
+ const inputs = { input: $input };
+ return _engine__WEBPACK_IMPORTED_MODULE_0__[/* ENGINE */ "a"].runKernelFunc(forward, inputs, null /* gradient */, _kernel_names__WEBPACK_IMPORTED_MODULE_1__[/* Real */ "hb"]);
+}
+const real = Object(_operation__WEBPACK_IMPORTED_MODULE_3__[/* op */ "a"])({ real_ });
+//# sourceMappingURL=real.js.map
+
+/***/ }),
+/* 23 */
+/***/ (function(module, __webpack_exports__, __webpack_require__) {
+
+"use strict";
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return Rank; });
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "c", function() { return upcastType; });
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "b", function() { return sumOutType; });
+/**
+ * @license
+ * Copyright 2017 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+var Rank;
+(function (Rank) {
+ Rank["R0"] = "R0";
+ Rank["R1"] = "R1";
+ Rank["R2"] = "R2";
+ Rank["R3"] = "R3";
+ Rank["R4"] = "R4";
+ Rank["R5"] = "R5";
+ Rank["R6"] = "R6";
+})(Rank || (Rank = {}));
+// Looks for upcasting types. Used, for example, in operations with mixed dtype
+// inputs.
+var UpcastInt32AndMap;
+(function (UpcastInt32AndMap) {
+ UpcastInt32AndMap["float32"] = "float32";
+ UpcastInt32AndMap["int32"] = "int32";
+ UpcastInt32AndMap["bool"] = "int32";
+ UpcastInt32AndMap["complex64"] = "complex64";
+})(UpcastInt32AndMap || (UpcastInt32AndMap = {}));
+var UpcastBoolAndMap;
+(function (UpcastBoolAndMap) {
+ UpcastBoolAndMap["float32"] = "float32";
+ UpcastBoolAndMap["int32"] = "int32";
+ UpcastBoolAndMap["bool"] = "bool";
+ UpcastBoolAndMap["complex64"] = "complex64";
+})(UpcastBoolAndMap || (UpcastBoolAndMap = {}));
+var UpcastFloat32AndMap;
+(function (UpcastFloat32AndMap) {
+ UpcastFloat32AndMap["float32"] = "float32";
+ UpcastFloat32AndMap["int32"] = "float32";
+ UpcastFloat32AndMap["bool"] = "float32";
+ UpcastFloat32AndMap["complex64"] = "complex64";
+})(UpcastFloat32AndMap || (UpcastFloat32AndMap = {}));
+var UpcastComplex64AndMap;
+(function (UpcastComplex64AndMap) {
+ UpcastComplex64AndMap["float32"] = "complex64";
+ UpcastComplex64AndMap["int32"] = "complex64";
+ UpcastComplex64AndMap["bool"] = "complex64";
+ UpcastComplex64AndMap["complex64"] = "complex64";
+})(UpcastComplex64AndMap || (UpcastComplex64AndMap = {}));
+const upcastTypeMap = {
+ 'float32': UpcastFloat32AndMap,
+ 'int32': UpcastInt32AndMap,
+ 'bool': UpcastBoolAndMap,
+ 'complex64': UpcastComplex64AndMap
+};
+function upcastType(typeA, typeB) {
+ if (typeA === 'string' || typeB === 'string') {
+ if (typeA === 'string' && typeB === 'string') {
+ return 'string';
+ }
+ throw new Error(`Can not upcast ${typeA} with ${typeB}`);
+ }
+ return upcastTypeMap[typeA][typeB];
+}
+/** Returns the output type after summation. */
+function sumOutType(type) {
+ return upcastType(type, 'int32');
+}
+//# sourceMappingURL=types.js.map
+
+/***/ }),
+/* 24 */
+/***/ (function(module, __webpack_exports__, __webpack_require__) {
+
+"use strict";
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "c", function() { return registerOp; });
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "b", function() { return getRegisteredOp; });
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return deregisterOp; });
+/**
+ * @license
+ * Copyright 2019 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+const CUSTOM_OPS = {};
+/**
+ * Register an Op for graph model executor. This allow you to register
+ * TensorFlow custom op or override existing op.
+ *
+ * Here is an example of registering a new MatMul Op.
+ * ```js
+ * const customMatmul = (node) =>
+ * tf.matMul(
+ * node.inputs[0], node.inputs[1],
+ * node.attrs['transpose_a'], node.attrs['transpose_b']);
+ *
+ * tf.registerOp('MatMul', customMatmul);
+ * ```
+ * The inputs and attrs of the node object is based on the TensorFlow op
+ * registry.
+ *
+ * @param name The Tensorflow Op name.
+ * @param opFunc An op function which is called with the current graph node
+ * during execution and needs to return a tensor or a list of tensors. The node
+ * has the following attributes:
+ * - attr: A map from attribute name to its value
+ * - inputs: A list of input tensors
+ */
+/** @doc {heading: 'Models', subheading: 'Op Registry'} */
+function registerOp(name, opFunc) {
+ const opMapper = {
+ tfOpName: name,
+ category: 'custom',
+ inputs: [],
+ attrs: [],
+ customExecutor: opFunc
+ };
+ CUSTOM_OPS[name] = opMapper;
+}
+/**
+ * Retrieve the OpMapper object for the registered op.
+ *
+ * @param name The Tensorflow Op name.
+ */
+/** @doc {heading: 'Models', subheading: 'Op Registry'} */
+function getRegisteredOp(name) {
+ return CUSTOM_OPS[name];
+}
+/**
+ * Deregister the Op for graph model executor.
+ *
+ * @param name The Tensorflow Op name.
+ */
+/** @doc {heading: 'Models', subheading: 'Op Registry'} */
+function deregisterOp(name) {
+ delete CUSTOM_OPS[name];
+}
+//# sourceMappingURL=register.js.map
+
+/***/ }),
+/* 25 */
+/***/ (function(module, __webpack_exports__, __webpack_require__) {
+
+"use strict";
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return imag; });
+/* harmony import */ var _engine__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(5);
+/* harmony import */ var _kernel_names__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(6);
+/* harmony import */ var _tensor_util_env__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(3);
+/* harmony import */ var _operation__WEBPACK_IMPORTED_MODULE_3__ = __webpack_require__(4);
+/**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+
+
+
+/**
+ * Returns the imaginary part of a complex (or real) tensor.
+ *
+ * Given a tensor input, this operation returns a tensor of type float that is
+ * the imaginary part of each element in input considered as a complex number.
+ * If input is real, a tensor of all zeros is returned.
+ *
+ * ```js
+ * const x = tf.complex([-2.25, 3.25], [4.75, 5.75]);
+ * tf.imag(x).print();
+ * ```
+ */
+/** @doc {heading: 'Tensors', subheading: 'Creation'} */
+function imag_(input) {
+ const $input = Object(_tensor_util_env__WEBPACK_IMPORTED_MODULE_2__[/* convertToTensor */ "a"])(input, 'input', 'imag');
+ const forward = (backend) => {
+ return backend.imag($input);
+ };
+ const inputs = { input: $input };
+ return _engine__WEBPACK_IMPORTED_MODULE_0__[/* ENGINE */ "a"].runKernelFunc(forward, inputs, null /* gradient */, _kernel_names__WEBPACK_IMPORTED_MODULE_1__[/* Imag */ "K"]);
+}
+const imag = Object(_operation__WEBPACK_IMPORTED_MODULE_3__[/* op */ "a"])({ imag_ });
+//# sourceMappingURL=imag.js.map
+
+/***/ }),
+/* 26 */
+/***/ (function(module, __webpack_exports__, __webpack_require__) {
+
+"use strict";
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "b", function() { return pool; });
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return maxPoolPositions; });
+/* harmony import */ var _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(0);
+/**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+function pool(xValues, xShape, dtype, strides, convInfo, poolType) {
+ const strideHeight = convInfo.strideHeight;
+ const strideWidth = convInfo.strideWidth;
+ const dilationHeight = convInfo.dilationHeight;
+ const dilationWidth = convInfo.dilationWidth;
+ const effectiveFilterHeight = convInfo.effectiveFilterHeight;
+ const effectiveFilterWidth = convInfo.effectiveFilterWidth;
+ const padTop = convInfo.padInfo.top;
+ const padLeft = convInfo.padInfo.left;
+ const initialValue = (poolType === 'max' ? Number.NEGATIVE_INFINITY :
+ Number.POSITIVE_INFINITY);
+ const output = Object(_tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__["buffer"])(convInfo.outShape, dtype);
+ const outputVals = output.values;
+ const outputBatchStrides = convInfo.outShape[1] * convInfo.outShape[2] * convInfo.outShape[3];
+ const outputRowStrides = convInfo.outShape[2] * convInfo.outShape[3];
+ const outputColStrides = convInfo.outShape[3];
+ for (let b = 0; b < convInfo.batchSize; ++b) {
+ const outputBatchOffset = b * outputBatchStrides;
+ const inputBatchOffset = b * strides[0];
+ for (let d = 0; d < convInfo.inChannels; ++d) {
+ for (let yR = 0; yR < convInfo.outHeight; ++yR) {
+ const xRCorner = yR * strideHeight - padTop;
+ const xRMin = Math.max(0, xRCorner);
+ const xRMax = Math.min(convInfo.inHeight, effectiveFilterHeight + xRCorner);
+ const outputRowOffset = outputBatchOffset + yR * outputRowStrides;
+ for (let yC = 0; yC < convInfo.outWidth; ++yC) {
+ const xCCorner = yC * strideWidth - padLeft;
+ const xCMin = Math.max(0, xCCorner);
+ const xCMax = Math.min(convInfo.inWidth, effectiveFilterWidth + xCCorner);
+ let minMaxValue = initialValue;
+ let avgValue = 0;
+ let count = 0;
+ for (let xR = xRMin; xR < xRMax; xR += dilationHeight) {
+ const xROffset = inputBatchOffset + xR * strides[1];
+ for (let xC = xCMin; xC < xCMax; xC += dilationWidth) {
+ const xCOffset = xROffset + xC * strides[2];
+ const pixel = xValues[xCOffset + d];
+ if ((poolType === 'max' && pixel > minMaxValue)) {
+ minMaxValue = pixel;
+ }
+ else if (poolType === 'avg') {
+ avgValue += pixel;
+ count++;
+ }
+ }
+ if (isNaN(minMaxValue)) {
+ break;
+ }
+ }
+ const outputOffset = outputRowOffset + yC * outputColStrides + d;
+ outputVals[outputOffset] =
+ poolType === 'avg' ? avgValue / count : minMaxValue;
+ }
+ }
+ }
+ }
+ return output;
+}
+function maxPoolPositions(xValues, xShape, dtype, convInfo, flattenPositions = false, includeBatchInIndex = false) {
+ const maxPositions = Object(_tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__["buffer"])(convInfo.outShape, 'int32');
+ const strideHeight = convInfo.strideHeight;
+ const strideWidth = convInfo.strideWidth;
+ const dilationHeight = convInfo.dilationHeight;
+ const dilationWidth = convInfo.dilationWidth;
+ const effectiveFilterHeight = convInfo.effectiveFilterHeight;
+ const effectiveFilterWidth = convInfo.effectiveFilterWidth;
+ const padTop = convInfo.padInfo.top;
+ const padLeft = convInfo.padInfo.left;
+ const xBuf = Object(_tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__["buffer"])(xShape, dtype, xValues);
+ for (let b = 0; b < convInfo.batchSize; ++b) {
+ for (let d = 0; d < convInfo.inChannels; ++d) {
+ for (let yR = 0; yR < convInfo.outHeight; ++yR) {
+ const xRCorner = yR * strideHeight - padTop;
+ let xRMin = xRCorner;
+ while (xRMin < 0) {
+ xRMin += dilationHeight;
+ }
+ // const xRMin = Math.max(0, xRCorner);
+ const xRMax = Math.min(convInfo.inHeight, effectiveFilterHeight + xRCorner);
+ for (let yC = 0; yC < convInfo.outWidth; ++yC) {
+ const xCCorner = yC * strideWidth - padLeft;
+ let xCMin = xCCorner;
+ while (xCMin < 0) {
+ xCMin += dilationWidth;
+ }
+ const xCMax = Math.min(convInfo.inWidth, effectiveFilterWidth + xCCorner);
+ let maxValue = Number.NEGATIVE_INFINITY;
+ let maxPosition = -1;
+ for (let xR = xRMin; xR < xRMax; xR += dilationHeight) {
+ const wR = xR - xRCorner;
+ for (let xC = xCMin; xC < xCMax; xC += dilationWidth) {
+ const wC = xC - xCCorner;
+ const pixel = xBuf.get(b, xR, xC, d);
+ if (pixel > maxValue) {
+ maxValue = pixel;
+ if (flattenPositions) {
+ maxPosition = includeBatchInIndex ?
+ ((b * convInfo.inHeight + xR) * convInfo.inWidth + xC) *
+ convInfo.inChannels +
+ d :
+ (xR * convInfo.inWidth + xC) * convInfo.inChannels + d;
+ }
+ else {
+ maxPosition = wR * effectiveFilterWidth + wC;
+ }
+ }
+ }
+ }
+ maxPositions.set(maxPosition, b, yR, yC, d);
+ }
+ }
+ }
+ }
+ return maxPositions;
+}
+//# sourceMappingURL=pool_utils.js.map
+
+/***/ }),
+/* 27 */
+/***/ (function(module, exports) {
+
+var g;
+
+// This works in non-strict mode
+g = (function() {
+ return this;
+})();
+
+try {
+ // This works if eval is allowed (see CSP)
+ g = g || new Function("return this")();
+} catch (e) {
+ // This works if the window reference is available
+ if (typeof window === "object") g = window;
+}
+
+// g can still be undefined, but nothing to do about it...
+// We return undefined, instead of nothing here, so it's
+// easier to handle this case. if(!global) { ...}
+
+module.exports = g;
+
+
+/***/ }),
+/* 28 */
+/***/ (function(module, exports) {
+
+module.exports = function(module) {
+ if (!module.webpackPolyfill) {
+ module.deprecate = function() {};
+ module.paths = [];
+ // module.parent = undefined by default
+ if (!module.children) module.children = [];
+ Object.defineProperty(module, "loaded", {
+ enumerable: true,
+ get: function() {
+ return module.l;
+ }
+ });
+ Object.defineProperty(module, "id", {
+ enumerable: true,
+ get: function() {
+ return module.i;
+ }
+ });
+ module.webpackPolyfill = 1;
+ }
+ return module;
+};
+
+
+/***/ }),
+/* 29 */
+/***/ (function(module, exports) {
+
+/* WEBPACK VAR INJECTION */(function(__webpack_amd_options__) {/* globals __webpack_amd_options__ */
+module.exports = __webpack_amd_options__;
+
+/* WEBPACK VAR INJECTION */}.call(this, {}))
+
+/***/ }),
+/* 30 */
+/***/ (function(module, __webpack_exports__, __webpack_require__) {
+
+"use strict";
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return transposeImpl; });
+/* harmony import */ var _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(0);
+/**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+function transposeImpl(xVals, xShape, dtype, perm, newShape) {
+ const xRank = xShape.length;
+ const xSize = _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__["util"].sizeFromShape(xShape);
+ const xStrides = _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__["util"].computeStrides(xShape);
+ const newStrides = _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__["util"].computeStrides(newShape);
+ const result = _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__["util"].getTypedArrayFromDType(dtype, _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__["util"].sizeFromShape(newShape));
+ for (let i = 0; i < xSize; ++i) {
+ const loc = _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__["util"].indexToLoc(i, xRank, xStrides);
+ // Permute location.
+ const newLoc = new Array(loc.length);
+ for (let i = 0; i < newLoc.length; i++) {
+ newLoc[i] = loc[perm[i]];
+ }
+ const newIndex = _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__["util"].locToIndex(newLoc, xRank, newStrides);
+ result[newIndex] = xVals[i];
+ }
+ return result;
+}
+//# sourceMappingURL=Transpose_impl.js.map
+
+/***/ }),
+/* 31 */
+/***/ (function(module, __webpack_exports__, __webpack_require__) {
+
+"use strict";
+
+// EXPORTS
+__webpack_require__.d(__webpack_exports__, "a", function() { return /* reexport */ backend_cpu_MathBackendCPU; });
+__webpack_require__.d(__webpack_exports__, "c", function() { return /* reexport */ version; });
+__webpack_require__.d(__webpack_exports__, "b", function() { return /* reexport */ shared_namespaceObject; });
+
+// NAMESPACE OBJECT: ./node_modules/@tensorflow/tfjs-backend-cpu/dist/shared.js
+var shared_namespaceObject = {};
+__webpack_require__.r(shared_namespaceObject);
+__webpack_require__.d(shared_namespaceObject, "maxImpl", function() { return Max_impl["a" /* maxImpl */]; });
+__webpack_require__.d(shared_namespaceObject, "transposeImpl", function() { return Transpose_impl["a" /* transposeImpl */]; });
+
+// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Max_impl.js
+var Max_impl = __webpack_require__(37);
+
+// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-backend-cpu/dist/kernels/Transpose_impl.js
+var Transpose_impl = __webpack_require__(30);
+
+// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-backend-cpu/dist/shared.js
+/**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+// Shared kernel impls for use in other backends.
+
+
+//# sourceMappingURL=shared.js.map
+// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/index.js + 269 modules
+var dist = __webpack_require__(0);
+
+// EXTERNAL MODULE: ./node_modules/seedrandom/index.js
+var seedrandom = __webpack_require__(20);
+
+// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-backend-cpu/dist/cpu_util.js
+var cpu_util = __webpack_require__(9);
+
+// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-backend-cpu/dist/utils/pool_utils.js
+var pool_utils = __webpack_require__(26);
+
+// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-backend-cpu/dist/backend_cpu.js
+/**
+ * @license
+ * Copyright 2017 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+
+
+
+
+const nonMaxSuppressionV3 = dist["kernel_impls"].nonMaxSuppressionV3;
+const split = dist["kernel_impls"].split;
+const tile = dist["kernel_impls"].tile;
+const topkImpl = dist["kernel_impls"].topkImpl;
+const whereImpl = dist["kernel_impls"].whereImpl;
+
+
+
+function mapActivation(backend, x, activation, preluActivationWeights) {
+ if (activation === 'linear') {
+ return backend.linear(x);
+ }
+ else if (activation === 'relu') {
+ return backend.relu(x);
+ }
+ else if (activation === 'elu') {
+ return backend.elu(x);
+ }
+ else if (activation === 'relu6') {
+ return backend.relu6(x);
+ }
+ else if (activation === 'prelu') {
+ return backend.prelu(x, preluActivationWeights);
+ }
+ throw new Error(`Activation ${activation} has not been implemented for the CPU backend.`);
+}
+class backend_cpu_MathBackendCPU extends dist["KernelBackend"] {
+ constructor() {
+ super();
+ this.blockSize = 48;
+ this.firstUse = true;
+ this.data = new dist["DataStorage"](this, Object(dist["engine"])());
+ }
+ write(values, shape, dtype) {
+ if (this.firstUse) {
+ this.firstUse = false;
+ if (Object(dist["env"])().get('IS_NODE')) {
+ dist["backend_util"].warn('\n============================\n' +
+ 'Hi there 👋. Looks like you are running TensorFlow.js in ' +
+ 'Node.js. To speed things up dramatically, install our node ' +
+ 'backend, which binds to TensorFlow C++, by running ' +
+ 'npm i @tensorflow/tfjs-node, ' +
+ 'or npm i @tensorflow/tfjs-node-gpu if you have CUDA. ' +
+ 'Then call require(\'@tensorflow/tfjs-node\'); (-gpu ' +
+ 'suffix for CUDA) at the start of your program. ' +
+ 'Visit https://github.com/tensorflow/tfjs-node for more details.' +
+ '\n============================');
+ }
+ }
+ const dataId = {};
+ this.data.set(dataId, { values, dtype });
+ return dataId;
+ }
+ move(dataId, values, shape, dtype) {
+ this.data.set(dataId, { values, dtype });
+ }
+ numDataIds() {
+ return this.data.numDataIds();
+ }
+ async read(dataId) {
+ return this.readSync(dataId);
+ }
+ readSync(dataId) {
+ const { dtype, complexTensors } = this.data.get(dataId);
+ if (dtype === 'complex64') {
+ const realValues = this.readSync(complexTensors.real.dataId);
+ const imagValues = this.readSync(complexTensors.imag.dataId);
+ return dist["backend_util"].mergeRealAndImagArrays(realValues, imagValues);
+ }
+ return this.data.get(dataId).values;
+ }
+ bufferSync(t) {
+ const data = this.readSync(t.dataId);
+ let decodedData = data;
+ if (t.dtype === 'string') {
+ try {
+ // Decode the bytes into string.
+ decodedData = data.map(d => dist["util"].decodeString(d));
+ }
+ catch (_a) {
+ throw new Error('Failed to decode encoded string bytes into utf-8');
+ }
+ }
+ return dist["buffer"](t.shape, t.dtype, decodedData);
+ }
+ makeOutput(values, shape, dtype) {
+ const dataId = this.write(values, shape, dtype);
+ return Object(dist["engine"])().makeTensorFromDataId(dataId, shape, dtype, this);
+ }
+ disposeData(dataId) {
+ if (this.data.has(dataId)) {
+ const { complexTensors } = this.data.get(dataId);
+ if (complexTensors != null) {
+ complexTensors.real.dispose();
+ complexTensors.imag.dispose();
+ }
+ this.data.delete(dataId);
+ }
+ }
+ async time(f) {
+ const start = dist["util"].now();
+ f();
+ const kernelMs = dist["util"].now() - start;
+ return { kernelMs };
+ }
+ memory() {
+ return {
+ // Unreliable due to automatic gc. The numbers above are cumulative.
+ unreliable: true,
+ reasons: ['The reported memory is an upper bound. Due to automatic garbage ' +
+ 'collection, the true allocated memory may be less.']
+ };
+ }
+ complex(real, imag) {
+ const result = this.makeOutput(null, real.shape, 'complex64');
+ const resultData = this.data.get(result.dataId);
+ // The backend owns the reference to the underlying real and imaginary
+ // clones. These will explicitly get disposed when the complex tensor is
+ // disposed.
+ resultData.complexTensors = {
+ real: Object(dist["engine"])().keep(real.clone()),
+ imag: Object(dist["engine"])().keep(imag.clone())
+ };
+ return result;
+ }
+ real(input) {
+ const resultData = this.data.get(input.dataId);
+ return resultData.complexTensors.real.clone();
+ }
+ imag(input) {
+ const resultData = this.data.get(input.dataId);
+ return resultData.complexTensors.imag.clone();
+ }
+ slice(x, begin, size) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'slice');
+ const isContinous = dist["slice_util"].isSliceContinous(x.shape, begin, size);
+ if (isContinous) {
+ const flatOffset = dist["slice_util"].computeFlatOffset(begin, x.strides);
+ const length = dist["util"].sizeFromShape(size);
+ const vals = this.readSync(x.dataId);
+ return dist["tensor"](vals.subarray(flatOffset, flatOffset + length), size, x.dtype);
+ }
+ const buffer = dist["buffer"](size, x.dtype);
+ const xBuf = this.bufferSync(x);
+ for (let i = 0; i < buffer.size; ++i) {
+ const loc = buffer.indexToLoc(i);
+ const xLoc = loc.map((idx, j) => idx + begin[j]);
+ buffer.values[i] = xBuf.get(...xLoc);
+ }
+ return buffer.toTensor();
+ }
+ stridedSlice(x, begin, end, strides) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'stridedSlice');
+ const outShape = dist["slice_util"].computeOutShape(begin, end, strides);
+ if (outShape.some(axis => axis === 0)) {
+ return dist["tensor"]([], outShape);
+ }
+ const buffer = dist["buffer"](outShape, x.dtype);
+ const xBuf = this.bufferSync(x);
+ for (let i = 0; i < buffer.size; i++) {
+ const loc = buffer.indexToLoc(i);
+ const newLoc = new Array(loc.length);
+ for (let j = 0; j < newLoc.length; j++) {
+ newLoc[j] = loc[j] * strides[j] + begin[j];
+ }
+ buffer.set(xBuf.get(...newLoc), ...loc);
+ }
+ return buffer.toTensor();
+ }
+ diag(x) {
+ const xVals = this.readSync(x.dataId);
+ const buffer = dist["buffer"]([x.size, x.size], x.dtype);
+ const vals = buffer.values;
+ for (let i = 0; i < xVals.length; i++) {
+ vals[i * x.size + i] = xVals[i];
+ }
+ return buffer.toTensor();
+ }
+ unstack(x, axis) {
+ const num = x.shape[axis];
+ const outShape = new Array(x.rank - 1);
+ let outIndex = 0;
+ for (let i = 0; i < x.rank; i++) {
+ if (i !== axis) {
+ outShape[outIndex++] = x.shape[i];
+ }
+ }
+ const begin = new Array(x.rank).fill(0);
+ const size = x.shape.slice();
+ size[axis] = 1;
+ const res = new Array(num);
+ for (let i = 0; i < res.length; i++) {
+ begin[axis] = i;
+ res[i] = this.slice(x, begin, size).reshape(outShape);
+ }
+ return res;
+ }
+ reverse(x, axis) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'reverse');
+ const buffer = dist["buffer"](x.shape, x.dtype);
+ const xBuf = this.bufferSync(x);
+ for (let i = 0; i < buffer.size; i++) {
+ const outLoc = buffer.indexToLoc(i);
+ const inLoc = outLoc.slice();
+ axis.forEach(ax => inLoc[ax] = x.shape[ax] - 1 - inLoc[ax]);
+ buffer.set(xBuf.get(...inLoc), ...outLoc);
+ }
+ return buffer.toTensor();
+ }
+ concat(tensors, axis) {
+ if (tensors[0].dtype === 'complex64') {
+ const reals = tensors.map((t) => dist["real"](t));
+ const imags = tensors.map((t) => dist["imag"](t));
+ return dist["complex"](this.concat(reals, axis), this.concat(imags, axis));
+ }
+ const tensors2D = tensors.map(t => {
+ const innerSize = dist["util"].sizeFromShape(t.shape.slice(axis));
+ return t.as2D(-1, innerSize);
+ });
+ const outShape = dist["backend_util"].computeOutShape(tensors2D.map(t => t.shape), 1 /* axis
+ */);
+ const values = dist["buffer"](outShape, tensors[0].dtype)
+ .values;
+ if (tensors2D[0].shape[0] === 1) {
+ // Use built-in TypedArray.set() method for speed.
+ let offset = 0;
+ tensors2D.forEach(t => {
+ values.set(this.readSync(t.dataId), offset);
+ offset += t.size;
+ });
+ }
+ else {
+ let colOffset = 0;
+ tensors2D.forEach(t => {
+ const tVals = this.readSync(t.dataId);
+ let tIdx = 0;
+ for (let row = 0; row < t.shape[0]; ++row) {
+ const resIdx = row * outShape[1] + colOffset;
+ for (let col = 0; col < t.shape[1]; ++col) {
+ values[resIdx + col] = tVals[tIdx++];
+ }
+ }
+ colOffset += t.shape[1];
+ });
+ }
+ const finalOutShape = dist["backend_util"].computeOutShape(tensors.map(t => t.shape), axis);
+ return dist["tensor"](values, finalOutShape, tensors[0].dtype);
+ }
+ neg(x) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'neg');
+ return this.multiply(dist["scalar"](-1), x);
+ }
+ add(a, b) {
+ if (a.dtype === 'complex64' || b.dtype === 'complex64') {
+ return this.broadcastedBinaryComplexOp(a.cast('complex64'), b.cast('complex64'), (aReal, aImag, bReal, bImag) => {
+ return { real: aReal + bReal, imag: aImag + bImag };
+ });
+ }
+ return this.broadcastedBinaryOp(a, b, Object(dist["upcastType"])(a.dtype, b.dtype), (aValue, bValue) => aValue + bValue);
+ }
+ addN(tensors) {
+ Object(cpu_util["a" /* assertNotComplex */])(tensors, 'addN');
+ const vals = tensors.map(t => this.readSync(t.dataId));
+ const result = dist["buffer"](tensors[0].shape, tensors[0].dtype);
+ const resultVals = result.values;
+ for (let i = 0; i < tensors.length; i++) {
+ const currVals = vals[i];
+ for (let j = 0; j < resultVals.length; j++) {
+ resultVals[j] += currVals[j];
+ }
+ }
+ return result.toTensor();
+ }
+ softmax(logits, dim) {
+ const axes = dist["util"].parseAxisParam([dim], logits.shape);
+ // TODO(annxingyuan): Call maxImpl rather than op as part of softmax kernel
+ // modularization.
+ const maxLogit = Object(dist["max"])(logits, axes);
+ const expandedShape = dist["backend_util"].expandShapeToKeepDim(maxLogit.shape, axes);
+ const a = this.subtract(logits, maxLogit.reshape(expandedShape));
+ const b = this.exp(a);
+ const sumExp = this.sum(b, axes).reshape(expandedShape);
+ // TODO(annxingyuan): Call divImpl rather than op as part of softmax
+ // kernel modularization.
+ return dist["div"](b, sumExp);
+ }
+ subtract(a, b) {
+ if (a.dtype === 'complex64' || b.dtype === 'complex64') {
+ return this.broadcastedBinaryComplexOp(a.cast('complex64'), b.cast('complex64'), (aReal, aImag, bReal, bImag) => {
+ return { real: aReal - bReal, imag: aImag - bImag };
+ });
+ }
+ return this.broadcastedBinaryOp(a, b, Object(dist["upcastType"])(a.dtype, b.dtype), (aValue, bValue) => aValue - bValue);
+ }
+ pow(a, b) {
+ Object(cpu_util["a" /* assertNotComplex */])([a, b], 'pow');
+ return this.broadcastedBinaryOp(a, b, a.dtype, (aValue, bValue) => Math.pow(aValue, bValue));
+ }
+ batchMatMul(a, b, transposeA, transposeB) {
+ Object(cpu_util["a" /* assertNotComplex */])([a, b], 'matMul');
+ const sharedDim = transposeA ? a.shape[1] : a.shape[2];
+ const leftDim = transposeA ? a.shape[2] : a.shape[1];
+ const rightDim = transposeB ? b.shape[1] : b.shape[2];
+ const batchDim = a.shape[0];
+ const aValues = this.readSync(a.dataId);
+ const bValues = this.readSync(b.dataId);
+ const [aBatch, aOuterStep, aInnerStep] = transposeA ?
+ [a.strides[0], 1, a.strides[1]] :
+ [a.strides[0], a.strides[1], 1];
+ const [bInnerStep, bOuterStep, bBatch] = transposeB ?
+ [1, b.strides[1], b.strides[0]] :
+ [b.strides[1], 1, b.strides[0]];
+ const size = leftDim * rightDim;
+ const result = dist["buffer"]([batchDim, leftDim, rightDim], a.dtype);
+ const resVals = result.values;
+ const blockSize = this.blockSize;
+ for (let b = 0; b < batchDim; b++) {
+ for (let i0 = 0; i0 < leftDim; i0 += blockSize) {
+ for (let j0 = 0; j0 < rightDim; j0 += blockSize) {
+ for (let k0 = 0; k0 < sharedDim; k0 += blockSize) {
+ // for when blockSize doesn't evenly divide the input
+ const iBlock = Math.min(i0 + blockSize, leftDim);
+ const jBlock = Math.min(j0 + blockSize, rightDim);
+ const kBlock = Math.min(k0 + blockSize, sharedDim);
+ for (let i = i0; i < iBlock; i++) {
+ for (let j = j0; j < jBlock; j++) {
+ let sum = 0.0;
+ for (let k = k0; k < kBlock; k++) {
+ sum += aValues[b * aBatch + i * aOuterStep + k * aInnerStep] *
+ bValues[k * bInnerStep + j * bOuterStep + b * bBatch];
+ }
+ resVals[b * size + (i * rightDim + j)] += sum;
+ }
+ }
+ }
+ }
+ }
+ }
+ return result.toTensor();
+ }
+ fusedBatchMatMul({ a, b, transposeA, transposeB, bias, activation, preluActivationWeights }) {
+ let result = this.batchMatMul(a, b, transposeA, transposeB);
+ if (bias) {
+ result = this.add(result, bias);
+ }
+ if (activation) {
+ result =
+ mapActivation(this, result, activation, preluActivationWeights);
+ }
+ return result;
+ }
+ multiply(a, b) {
+ if (a.dtype === 'complex64' || b.dtype === 'complex64') {
+ return this.broadcastedBinaryComplexOp(a.cast('complex64'), b.cast('complex64'), (aReal, aImag, bReal, bImag) => {
+ return {
+ real: aReal * bReal - aImag * bImag,
+ imag: aReal * bImag + aImag * bReal
+ };
+ });
+ }
+ return this.broadcastedBinaryOp(a, b, Object(dist["upcastType"])(a.dtype, b.dtype), (aValue, bValue) => aValue * bValue);
+ }
+ floorDiv(a, b) {
+ Object(cpu_util["a" /* assertNotComplex */])([a, b], 'floorDiv');
+ const op = (a, b) => Math.floor(a / b);
+ const outputDtype = 'int32';
+ return this.broadcastedBinaryOp(a, b, outputDtype, op);
+ }
+ sum(x, axes) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'sum');
+ dist["backend_util"].assertAxesAreInnerMostDims('sum', axes, x.rank);
+ const [outShape, reduceShape] = dist["backend_util"].computeOutAndReduceShapes(x.shape, axes);
+ const resultDtype = Object(dist["upcastType"])(x.dtype, 'int32');
+ const result = dist["zeros"](outShape, resultDtype);
+ const reduceSize = dist["util"].sizeFromShape(reduceShape);
+ const vals = this.readSync(result.dataId);
+ const aVals = this.readSync(x.dataId);
+ for (let i = 0; i < vals.length; ++i) {
+ const offset = i * reduceSize;
+ let sum = 0;
+ for (let j = 0; j < reduceSize; ++j) {
+ sum += aVals[offset + j];
+ }
+ vals[i] = sum;
+ }
+ return result;
+ }
+ prod(x, axes) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'sum');
+ const [outShape, reduceShape] = dist["backend_util"].computeOutAndReduceShapes(x.shape, axes);
+ const resultDtype = Object(dist["upcastType"])(x.dtype, 'int32');
+ const result = dist["zeros"](outShape, resultDtype);
+ const reduceSize = dist["util"].sizeFromShape(reduceShape);
+ const vals = this.readSync(result.dataId);
+ const aVals = this.readSync(x.dataId);
+ for (let i = 0; i < vals.length; ++i) {
+ const offset = i * reduceSize;
+ let prod = 1;
+ for (let j = 0; j < reduceSize; ++j) {
+ prod *= aVals[offset + j];
+ }
+ vals[i] = prod;
+ }
+ return result;
+ }
+ unsortedSegmentSum(x, segmentIds, numSegments) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'unsortedSegmentSum');
+ const res = [];
+ // Reshape the segment id's so that they can be broadcast with
+ // x. The new shape should be [segmentIds.shape, 1, ..., 1]
+ const numIters = x.rank - segmentIds.rank;
+ for (let i = 0; i < numIters; ++i) {
+ segmentIds = segmentIds.expandDims(i + 1);
+ }
+ for (let i = 0; i < numSegments; ++i) {
+ const segmentId = dist["scalar"](i, 'int32');
+ const mask = dist["equal"](segmentId, segmentIds).asType('float32');
+ const sum = mask.mul(x).sum(0);
+ res.push(sum);
+ }
+ return dist["stack"](res);
+ }
+ argMin(x, axis) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'argMin');
+ const axes = [axis];
+ dist["backend_util"].assertAxesAreInnerMostDims('argMin', axes, x.rank);
+ const [outShape, reduceShape] = dist["backend_util"].computeOutAndReduceShapes(x.shape, axes);
+ const result = dist["zeros"](outShape, 'int32');
+ const reduceSize = dist["util"].sizeFromShape(reduceShape);
+ const vals = this.readSync(result.dataId);
+ const aVals = this.readSync(x.dataId);
+ for (let i = 0; i < vals.length; ++i) {
+ const offset = i * reduceSize;
+ let min = aVals[offset];
+ let minIndex = 0;
+ for (let j = 0; j < reduceSize; ++j) {
+ const value = aVals[offset + j];
+ if (value < min) {
+ min = value;
+ minIndex = j;
+ }
+ }
+ vals[i] = minIndex;
+ }
+ return result;
+ }
+ argMax(x, axis) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'argMax');
+ const axes = [axis];
+ dist["backend_util"].assertAxesAreInnerMostDims('argMax', axes, x.rank);
+ const [outShape, reduceShape] = dist["backend_util"].computeOutAndReduceShapes(x.shape, axes);
+ const result = dist["zeros"](outShape, 'int32');
+ const reduceSize = dist["util"].sizeFromShape(reduceShape);
+ const vals = this.readSync(result.dataId);
+ const aVals = this.readSync(x.dataId);
+ for (let i = 0; i < vals.length; ++i) {
+ const offset = i * reduceSize;
+ let max = aVals[offset];
+ let maxIndex = 0;
+ for (let j = 0; j < reduceSize; ++j) {
+ const value = aVals[offset + j];
+ if (value > max) {
+ max = value;
+ maxIndex = j;
+ }
+ }
+ vals[i] = maxIndex;
+ }
+ return result;
+ }
+ cumsum(x, axis, exclusive, reverse) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'cumsum');
+ if (axis !== x.rank - 1) {
+ throw new Error(`backend.cumsum in CPU expects an inner-most axis=${x.rank - 1} ` +
+ `but got axis=${axis}`);
+ }
+ const resultDtype = Object(dist["upcastType"])(x.dtype, 'int32');
+ const result = dist["zeros"](x.shape, resultDtype);
+ const vals = this.readSync(result.dataId);
+ const aVals = this.readSync(x.dataId);
+ const finalDim = x.shape[x.rank - 1];
+ const indexAdjuster = reverse ?
+ (i, j) => i + finalDim - j - 1 :
+ (i, j) => i + j;
+ for (let i = 0; i < aVals.length; i += finalDim) {
+ for (let j = 0; j < finalDim; j++) {
+ const idx = indexAdjuster(i, j);
+ if (j === 0) {
+ vals[idx] = exclusive ? 0 : aVals[idx];
+ }
+ else {
+ const prevIdx = indexAdjuster(i, j - 1);
+ vals[idx] = exclusive ? aVals[prevIdx] + vals[prevIdx] :
+ aVals[idx] + vals[prevIdx];
+ }
+ }
+ }
+ return result;
+ }
+ equal(a, b) {
+ Object(cpu_util["a" /* assertNotComplex */])([a, b], 'equal');
+ return this.broadcastedBinaryOp(a, b, 'bool', (aVal, bVal) => {
+ return (aVal === bVal) ? 1 : 0;
+ });
+ }
+ notEqual(a, b) {
+ Object(cpu_util["a" /* assertNotComplex */])([a, b], 'notEqual');
+ return this.broadcastedBinaryOp(a, b, 'bool', (aVal, bVal) => {
+ return (aVal !== bVal) ? 1 : 0;
+ });
+ }
+ less(a, b) {
+ Object(cpu_util["a" /* assertNotComplex */])([a, b], 'less');
+ return this.broadcastedBinaryOp(a, b, 'bool', (aVal, bVal) => {
+ return (aVal < bVal) ? 1 : 0;
+ });
+ }
+ lessEqual(a, b) {
+ Object(cpu_util["a" /* assertNotComplex */])([a, b], 'lessEqual');
+ return this.broadcastedBinaryOp(a, b, 'bool', (aVal, bVal) => {
+ return (aVal <= bVal) ? 1 : 0;
+ });
+ }
+ greater(a, b) {
+ Object(cpu_util["a" /* assertNotComplex */])([a, b], 'greater');
+ return this.broadcastedBinaryOp(a, b, 'bool', (aVal, bVal) => {
+ return (aVal > bVal) ? 1 : 0;
+ });
+ }
+ greaterEqual(a, b) {
+ Object(cpu_util["a" /* assertNotComplex */])([a, b], 'greaterEqual');
+ return this.broadcastedBinaryOp(a, b, 'bool', (aVal, bVal) => {
+ return (aVal >= bVal) ? 1 : 0;
+ });
+ }
+ logicalNot(x) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'logicalNot');
+ const values = this.readSync(x.dataId);
+ const newValues = new Uint8Array(values.length);
+ for (let i = 0; i < values.length; ++i) {
+ newValues[i] = values[i] ? 0 : 1;
+ }
+ return this.makeOutput(newValues, x.shape, 'bool');
+ }
+ logicalAnd(a, b) {
+ Object(cpu_util["a" /* assertNotComplex */])([a, b], 'logicalAnd');
+ return this.broadcastedBinaryOp(a, b, 'bool', (aVal, bVal) => {
+ return aVal && bVal;
+ });
+ }
+ logicalOr(a, b) {
+ Object(cpu_util["a" /* assertNotComplex */])([a, b], 'logicalOr');
+ return this.broadcastedBinaryOp(a, b, 'bool', (aVal, bVal) => {
+ return aVal || bVal;
+ });
+ }
+ select(condition, a, b) {
+ Object(cpu_util["a" /* assertNotComplex */])([condition, a, b], 'select');
+ const values = this.readSync(condition.dataId);
+ const aValues = this.readSync(a.dataId);
+ const bValues = this.readSync(b.dataId);
+ const result = dist["zeros"](a.shape, Object(dist["upcastType"])(a.dtype, b.dtype));
+ const newValues = this.readSync(result.dataId);
+ let index = 0;
+ const offset = condition.rank === 0 || condition.rank > 1 || a.rank === 1 ?
+ 1 :
+ dist["util"].sizeFromShape(a.shape.slice(1));
+ for (let i = 0; i < values.length; i++) {
+ for (let j = 0; j < offset; j++) {
+ if (values[i] === 1) {
+ newValues[index++] = aValues[i];
+ }
+ else {
+ newValues[index++] = bValues[i];
+ }
+ }
+ }
+ return result;
+ }
+ where(condition) {
+ Object(cpu_util["a" /* assertNotComplex */])([condition], 'where');
+ const condVals = this.readSync(condition.dataId);
+ return whereImpl(condition.shape, condVals);
+ }
+ topk(x, k, sorted) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'topk');
+ const xVals = this.readSync(x.dataId);
+ return topkImpl(xVals, x.shape, x.dtype, k, sorted);
+ }
+ min(x, axes) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'min');
+ dist["backend_util"].assertAxesAreInnerMostDims('min', axes, x.rank);
+ const [outShape, reduceShape] = dist["backend_util"].computeOutAndReduceShapes(x.shape, axes);
+ const result = dist["zeros"](outShape, x.dtype);
+ const reduceSize = dist["util"].sizeFromShape(reduceShape);
+ const vals = this.readSync(result.dataId);
+ const aVals = this.readSync(x.dataId);
+ for (let i = 0; i < vals.length; ++i) {
+ const offset = i * reduceSize;
+ let min = aVals[offset];
+ for (let j = 0; j < reduceSize; ++j) {
+ const value = aVals[offset + j];
+ if (value < min) {
+ min = value;
+ }
+ }
+ vals[i] = min;
+ }
+ return result;
+ }
+ minimum(a, b) {
+ Object(cpu_util["a" /* assertNotComplex */])([a, b], 'minimum');
+ return this.broadcastedBinaryOp(a, b, a.dtype, (aVal, bVal) => Math.min(aVal, bVal));
+ }
+ mod(a, b) {
+ Object(cpu_util["a" /* assertNotComplex */])([a, b], 'mod');
+ return this.broadcastedBinaryOp(a, b, a.dtype, (aVal, bVal) => {
+ const rem = aVal % bVal;
+ if ((aVal < 0 && bVal < 0) || (aVal >= 0 && bVal >= 0)) {
+ return rem;
+ }
+ else {
+ return (rem + bVal) % bVal;
+ }
+ });
+ }
+ maximum(a, b) {
+ Object(cpu_util["a" /* assertNotComplex */])([a, b], 'maximum');
+ return this.broadcastedBinaryOp(a, b, a.dtype, (aVal, bVal) => Math.max(aVal, bVal));
+ }
+ all(x, axes) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'all');
+ dist["backend_util"].assertAxesAreInnerMostDims('all', axes, x.rank);
+ const [outShape, reduceShape] = dist["backend_util"].computeOutAndReduceShapes(x.shape, axes);
+ const result = dist["zeros"](outShape, x.dtype);
+ const reduceSize = dist["util"].sizeFromShape(reduceShape);
+ const vals = this.readSync(result.dataId);
+ const aVals = this.readSync(x.dataId);
+ for (let i = 0; i < vals.length; ++i) {
+ const offset = i * reduceSize;
+ let all = aVals[offset];
+ for (let j = 0; j < reduceSize; ++j) {
+ const value = aVals[offset + j];
+ all = all && value;
+ }
+ vals[i] = all;
+ }
+ return result;
+ }
+ any(x, axes) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'any');
+ dist["backend_util"].assertAxesAreInnerMostDims('any', axes, x.rank);
+ const [outShape, reduceShape] = dist["backend_util"].computeOutAndReduceShapes(x.shape, axes);
+ const result = dist["zeros"](outShape, x.dtype);
+ const reduceSize = dist["util"].sizeFromShape(reduceShape);
+ const vals = this.readSync(result.dataId);
+ const aVals = this.readSync(x.dataId);
+ for (let i = 0; i < vals.length; ++i) {
+ const offset = i * reduceSize;
+ let anyVal = aVals[offset];
+ for (let j = 0; j < reduceSize; ++j) {
+ const value = aVals[offset + j];
+ anyVal = anyVal || value;
+ }
+ vals[i] = anyVal;
+ }
+ return result;
+ }
+ squaredDifference(a, b) {
+ Object(cpu_util["a" /* assertNotComplex */])([a, b], 'squaredDifference');
+ return this.broadcastedBinaryOp(a, b, a.dtype, (aVal, bVal) => {
+ const diff = aVal - bVal;
+ return diff * diff;
+ });
+ }
+ ceil(x) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'ceil');
+ const values = this.readSync(x.dataId);
+ const newValues = new Float32Array(values.length);
+ for (let i = 0; i < values.length; ++i) {
+ newValues[i] = Math.ceil(values[i]);
+ }
+ return this.makeOutput(newValues, x.shape, 'float32');
+ }
+ floor(x) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'floor');
+ const values = this.readSync(x.dataId);
+ const newValues = new Float32Array(values.length);
+ for (let i = 0; i < values.length; ++i) {
+ newValues[i] = Math.floor(values[i]);
+ }
+ return this.makeOutput(newValues, x.shape, 'float32');
+ }
+ sign(x) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'x');
+ const values = this.readSync(x.dataId);
+ const newValues = new Float32Array(values.length);
+ for (let i = 0; i < values.length; ++i) {
+ if (values[i] < 0) {
+ newValues[i] = -1;
+ }
+ else if (values[i] > 0) {
+ newValues[i] = 1;
+ }
+ else {
+ newValues[i] = 0;
+ }
+ }
+ return this.makeOutput(newValues, x.shape, 'float32');
+ }
+ isNaN(x) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'x');
+ const values = this.readSync(x.dataId);
+ const newValues = new Uint8Array(values.length);
+ for (let i = 0; i < values.length; ++i) {
+ if (Number.isNaN(values[i])) {
+ newValues[i] = 1;
+ }
+ }
+ return this.makeOutput(newValues, x.shape, 'bool');
+ }
+ isInf(x) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'x');
+ const values = this.readSync(x.dataId);
+ const newValues = new Uint8Array(values.length);
+ for (let i = 0; i < values.length; ++i) {
+ if (Math.abs(values[i]) === Infinity) {
+ newValues[i] = 1;
+ }
+ }
+ return this.makeOutput(newValues, x.shape, 'bool');
+ }
+ isFinite(x) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'x');
+ const values = this.readSync(x.dataId);
+ const newValues = new Uint8Array(values.length);
+ for (let i = 0; i < values.length; ++i) {
+ if (Number.isFinite(values[i])) {
+ newValues[i] = 1;
+ }
+ }
+ return this.makeOutput(newValues, x.shape, 'bool');
+ }
+ round(x) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'round');
+ const values = this.readSync(x.dataId);
+ const newValues = new Float32Array(values.length);
+ for (let i = 0; i < values.length; ++i) {
+ // The algorithm is based on banker's rounding.
+ const base = Math.floor(values[i]);
+ if (values[i] - base < 0.5) {
+ newValues[i] = Math.floor(values[i]);
+ }
+ else if (values[i] - base > 0.5) {
+ newValues[i] = Math.ceil(values[i]);
+ }
+ else {
+ if (base % 2.0 === 0.0) {
+ newValues[i] = base;
+ }
+ else {
+ newValues[i] = base + 1.0;
+ }
+ }
+ }
+ return this.makeOutput(newValues, x.shape, 'float32');
+ }
+ exp(x) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'exp');
+ const values = this.readSync(x.dataId);
+ const newValues = new Float32Array(values.length);
+ for (let i = 0; i < values.length; ++i) {
+ newValues[i] = Math.exp(values[i]);
+ }
+ return this.makeOutput(newValues, x.shape, 'float32');
+ }
+ expm1(x) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'expm1');
+ const values = this.readSync(x.dataId);
+ const newValues = new Float32Array(values.length);
+ for (let i = 0; i < values.length; ++i) {
+ newValues[i] = Math.expm1(values[i]);
+ }
+ return this.makeOutput(newValues, x.shape, 'float32');
+ }
+ log(x) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'log');
+ const values = this.readSync(x.dataId);
+ const newValues = new Float32Array(values.length);
+ for (let i = 0; i < values.length; ++i) {
+ const value = values[i];
+ newValues[i] = Math.log(value);
+ }
+ return this.makeOutput(newValues, x.shape, 'float32');
+ }
+ log1p(x) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'log1p');
+ const values = this.readSync(x.dataId);
+ const newValues = new Float32Array(values.length);
+ for (let i = 0; i < values.length; ++i) {
+ const value = values[i];
+ newValues[i] = Math.log1p(value);
+ }
+ return this.makeOutput(newValues, x.shape, 'float32');
+ }
+ sqrt(x) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'sqrt');
+ const values = this.readSync(x.dataId);
+ const newValues = new Float32Array(values.length);
+ for (let i = 0; i < values.length; ++i) {
+ const value = values[i];
+ newValues[i] = Math.sqrt(value);
+ }
+ return this.makeOutput(newValues, x.shape, 'float32');
+ }
+ rsqrt(x) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'rsqrt');
+ const values = this.readSync(x.dataId);
+ const newValues = new Float32Array(values.length);
+ for (let i = 0; i < values.length; ++i) {
+ const value = values[i];
+ newValues[i] = 1 / Math.sqrt(value);
+ }
+ return this.makeOutput(newValues, x.shape, 'float32');
+ }
+ reciprocal(x) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'reciprocal');
+ const values = this.readSync(x.dataId);
+ const newValues = new Float32Array(values.length);
+ for (let i = 0; i < values.length; ++i) {
+ newValues[i] = 1 / values[i];
+ }
+ return this.makeOutput(newValues, x.shape, 'float32');
+ }
+ linear(x) {
+ return x;
+ }
+ relu(x) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'relu');
+ const res = dist["zeros"](x.shape, x.dtype);
+ const resVals = this.readSync(res.dataId);
+ const inVals = this.readSync(x.dataId);
+ for (let i = 0; i < inVals.length; ++i) {
+ resVals[i] = Math.max(0, inVals[i]);
+ }
+ return res;
+ }
+ relu6(x) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'relu');
+ const res = dist["zeros"](x.shape, x.dtype);
+ const resVals = this.readSync(res.dataId);
+ const inVals = this.readSync(x.dataId);
+ for (let i = 0; i < inVals.length; ++i) {
+ resVals[i] = Math.min(Math.max(0, inVals[i]), 6);
+ }
+ return res;
+ }
+ prelu(x, a) {
+ Object(cpu_util["a" /* assertNotComplex */])([x, a], 'prelu');
+ return this.broadcastedBinaryOp(x, a, x.dtype, (xValue, aValue) => xValue < 0 ? aValue * xValue : xValue);
+ }
+ elu(x) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'elu');
+ const resultValues = new Float32Array(x.size);
+ const values = this.readSync(x.dataId);
+ for (let i = 0; i < values.length; ++i) {
+ const v = values[i];
+ if (v >= 0) {
+ resultValues[i] = v;
+ }
+ else {
+ resultValues[i] = (Math.exp(v) - 1);
+ }
+ }
+ return this.makeOutput(resultValues, x.shape, 'float32');
+ }
+ eluDer(dy, y) {
+ Object(cpu_util["a" /* assertNotComplex */])([dy, y], 'eluDer');
+ const resultValues = new Float32Array(y.size);
+ const values = this.readSync(y.dataId);
+ const dyValues = this.readSync(dy.dataId);
+ for (let i = 0; i < values.length; ++i) {
+ const v = values[i];
+ if (v >= 1) {
+ resultValues[i] = dyValues[i];
+ }
+ else {
+ resultValues[i] = dyValues[i] * (v + 1);
+ }
+ }
+ return this.makeOutput(resultValues, y.shape, 'float32');
+ }
+ selu(x) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'selu');
+ // Stable and Attracting Fixed Point (0, 1) for Normalized Weights.
+ // see: https://arxiv.org/abs/1706.02515
+ const scaleAlpha = dist["backend_util"].SELU_SCALEALPHA;
+ const scale = dist["backend_util"].SELU_SCALE;
+ const resultValues = new Float32Array(x.size);
+ const values = this.readSync(x.dataId);
+ for (let i = 0; i < values.length; ++i) {
+ const v = values[i];
+ if (v >= 0) {
+ resultValues[i] = scale * v;
+ }
+ else {
+ resultValues[i] = scaleAlpha * (Math.exp(v) - 1);
+ }
+ }
+ return this.makeOutput(resultValues, x.shape, 'float32');
+ }
+ clip(x, min, max) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'clip');
+ const resultValues = new Float32Array(x.size);
+ const values = this.readSync(x.dataId);
+ for (let i = 0; i < values.length; ++i) {
+ const v = values[i];
+ resultValues[i] = v > max ? max : (v < min ? min : v);
+ }
+ return this.makeOutput(resultValues, x.shape, 'float32');
+ }
+ abs(x) {
+ const resultValues = new Float32Array(x.size);
+ const values = this.readSync(x.dataId);
+ for (let i = 0; i < values.length; ++i) {
+ resultValues[i] = Math.abs(values[i]);
+ }
+ return this.makeOutput(resultValues, x.shape, 'float32');
+ }
+ complexAbs(x) {
+ const resultValues = new Float32Array(x.size);
+ const values = this.readSync(x.dataId);
+ for (let i = 0; i < x.size; ++i) {
+ const real = values[i * 2];
+ const imag = values[i * 2 + 1];
+ resultValues[i] = Math.hypot(real, imag);
+ }
+ return this.makeOutput(resultValues, x.shape, 'float32');
+ }
+ int(x) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'int');
+ const resultValues = new Int32Array(x.size);
+ const values = this.readSync(x.dataId);
+ for (let i = 0; i < values.length; ++i) {
+ resultValues[i] = values[i];
+ }
+ return this.makeOutput(resultValues, x.shape, 'int32');
+ }
+ sigmoid(x) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'sigmoid');
+ const resultValues = new Float32Array(x.size);
+ const values = this.readSync(x.dataId);
+ for (let i = 0; i < values.length; ++i) {
+ resultValues[i] = 1 / (1 + Math.exp(-values[i]));
+ }
+ return this.makeOutput(resultValues, x.shape, 'float32');
+ }
+ softplus(x) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'softplus');
+ // mirrors the implementation of tf.nn.softplus: https://goo.gl/vkcvwX
+ // epsilon is the difference between 1.0 and the next representable float.
+ // For a single precision 32 bit float this should be 2^-23, see:
+ // https://math.byu.edu/~schow/work/IEEEFloatingPoint.htm
+ const epsilon = 1.1920928955078125e-7;
+ const threshold = Math.log(epsilon) + 2.0;
+ const resultValues = new Float32Array(x.size);
+ const values = this.readSync(x.dataId);
+ for (let i = 0; i < values.length; ++i) {
+ // Value above which exp(x) may overflow, but softplus(x) == x
+ // is within machine epsilon.
+ const tooLarge = values[i] > -threshold;
+ // Value below which exp(x) may underflow, but softplus(x) == exp(x)
+ // is within machine epsilon.
+ const tooSmall = values[i] < threshold;
+ const expX = Math.exp(values[i]);
+ let result;
+ if (tooSmall) {
+ result = expX;
+ }
+ else if (tooLarge) {
+ result = values[i];
+ }
+ else {
+ result = Math.log(1.0 + expX);
+ }
+ resultValues[i] = result;
+ }
+ return this.makeOutput(resultValues, x.shape, 'float32');
+ }
+ sin(x) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'sin');
+ const resultValues = new Float32Array(x.size);
+ const values = this.readSync(x.dataId);
+ for (let i = 0; i < values.length; ++i) {
+ resultValues[i] = Math.sin(values[i]);
+ }
+ return this.makeOutput(resultValues, x.shape, 'float32');
+ }
+ cos(x) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'cos');
+ const resultValues = new Float32Array(x.size);
+ const values = this.readSync(x.dataId);
+ for (let i = 0; i < values.length; ++i) {
+ resultValues[i] = Math.cos(values[i]);
+ }
+ return this.makeOutput(resultValues, x.shape, 'float32');
+ }
+ tan(x) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'tan');
+ const resultValues = new Float32Array(x.size);
+ const values = this.readSync(x.dataId);
+ for (let i = 0; i < values.length; ++i) {
+ resultValues[i] = Math.tan(values[i]);
+ }
+ return this.makeOutput(resultValues, x.shape, 'float32');
+ }
+ asin(x) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'asin');
+ const resultValues = new Float32Array(x.size);
+ const values = this.readSync(x.dataId);
+ for (let i = 0; i < values.length; ++i) {
+ resultValues[i] = Math.asin(values[i]);
+ }
+ return this.makeOutput(resultValues, x.shape, 'float32');
+ }
+ acos(x) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'acos');
+ const resultValues = new Float32Array(x.size);
+ const values = this.readSync(x.dataId);
+ for (let i = 0; i < values.length; ++i) {
+ resultValues[i] = Math.acos(values[i]);
+ }
+ return this.makeOutput(resultValues, x.shape, 'float32');
+ }
+ atan(x) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'atan');
+ const resultValues = new Float32Array(x.size);
+ const values = this.readSync(x.dataId);
+ for (let i = 0; i < values.length; ++i) {
+ resultValues[i] = Math.atan(values[i]);
+ }
+ return this.makeOutput(resultValues, x.shape, 'float32');
+ }
+ atan2(a, b) {
+ Object(cpu_util["a" /* assertNotComplex */])([a, b], 'atan2');
+ return this.broadcastedBinaryOp(a, b, a.dtype, (aValue, bValue) => Math.atan2(aValue, bValue));
+ }
+ sinh(x) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'sinh');
+ const resultValues = new Float32Array(x.size);
+ const values = this.readSync(x.dataId);
+ for (let i = 0; i < values.length; ++i) {
+ resultValues[i] = Math.sinh(values[i]);
+ }
+ return this.makeOutput(resultValues, x.shape, 'float32');
+ }
+ cosh(x) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'cosh');
+ const resultValues = new Float32Array(x.size);
+ const values = this.readSync(x.dataId);
+ for (let i = 0; i < values.length; ++i) {
+ resultValues[i] = Math.cosh(values[i]);
+ }
+ return this.makeOutput(resultValues, x.shape, 'float32');
+ }
+ tanh(x) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'tanh');
+ const resultValues = new Float32Array(x.size);
+ const values = this.readSync(x.dataId);
+ for (let i = 0; i < values.length; ++i) {
+ resultValues[i] = dist["util"].tanh(values[i]);
+ }
+ return this.makeOutput(resultValues, x.shape, 'float32');
+ }
+ asinh(x) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'asinh');
+ const resultValues = new Float32Array(x.size);
+ const values = this.readSync(x.dataId);
+ for (let i = 0; i < values.length; ++i) {
+ resultValues[i] = Math.asinh(values[i]);
+ }
+ return this.makeOutput(resultValues, x.shape, 'float32');
+ }
+ acosh(x) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'acosh');
+ const resultValues = new Float32Array(x.size);
+ const values = this.readSync(x.dataId);
+ for (let i = 0; i < values.length; ++i) {
+ resultValues[i] = Math.acosh(values[i]);
+ }
+ return this.makeOutput(resultValues, x.shape, 'float32');
+ }
+ atanh(x) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'atanh');
+ const resultValues = new Float32Array(x.size);
+ const values = this.readSync(x.dataId);
+ for (let i = 0; i < values.length; ++i) {
+ resultValues[i] = Math.atanh(values[i]);
+ }
+ return this.makeOutput(resultValues, x.shape, 'float32');
+ }
+ erf(x) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'erf');
+ const resultValues = new Float32Array(x.size);
+ const values = this.readSync(x.dataId);
+ const p = dist["backend_util"].ERF_P;
+ const a1 = dist["backend_util"].ERF_A1;
+ const a2 = dist["backend_util"].ERF_A2;
+ const a3 = dist["backend_util"].ERF_A3;
+ const a4 = dist["backend_util"].ERF_A4;
+ const a5 = dist["backend_util"].ERF_A5;
+ for (let i = 0; i < values.length; ++i) {
+ const sign = Math.sign(values[i]);
+ const v = Math.abs(values[i]);
+ const t = 1.0 / (1.0 + p * v);
+ resultValues[i] = sign *
+ (1.0 -
+ (((((a5 * t + a4) * t) + a3) * t + a2) * t + a1) * t *
+ Math.exp(-v * v));
+ }
+ return this.makeOutput(resultValues, x.shape, 'float32');
+ }
+ step(x, alpha = 0) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'step');
+ const resultValues = new Float32Array(x.size);
+ const values = this.readSync(x.dataId);
+ for (let i = 0; i < values.length; ++i) {
+ const value = values[i];
+ if (isNaN(value)) {
+ resultValues[i] = NaN;
+ }
+ else {
+ resultValues[i] = value > 0 ? 1 : alpha;
+ }
+ }
+ return this.makeOutput(resultValues, x.shape, 'float32');
+ }
+ fusedConv2d({ input, filter, convInfo, bias, activation, preluActivationWeights }) {
+ let result = this.conv2d(input, filter, convInfo);
+ if (bias) {
+ result = this.add(result, bias);
+ }
+ if (activation) {
+ result =
+ mapActivation(this, result, activation, preluActivationWeights);
+ }
+ return result;
+ }
+ conv2d(x, filter, convInfo) {
+ Object(cpu_util["a" /* assertNotComplex */])([x, filter], 'conv2d');
+ const filterHeight = convInfo.filterHeight;
+ const filterWidth = convInfo.filterWidth;
+ const dilationHeight = convInfo.dilationHeight;
+ const dilationWidth = convInfo.dilationWidth;
+ const padLeft = convInfo.padInfo.left;
+ const padTop = convInfo.padInfo.top;
+ const isChannelsLast = convInfo.dataFormat === 'channelsLast';
+ const y = dist["buffer"](convInfo.outShape, x.dtype);
+ const xBatchStride = x.strides[0];
+ const xRowStride = isChannelsLast ? x.strides[1] : x.strides[2];
+ const xColStride = isChannelsLast ? x.strides[2] : 1;
+ const xChannelStride = isChannelsLast ? 1 : x.strides[1];
+ const yBatchStride = y.strides[0];
+ const yRowStride = isChannelsLast ? y.strides[1] : y.strides[2];
+ const yColStride = isChannelsLast ? y.strides[2] : 1;
+ const yChannelStride = isChannelsLast ? 1 : y.strides[1];
+ const xVals = this.readSync(x.dataId);
+ const wVals = this.readSync(filter.dataId);
+ const yVals = y.values;
+ for (let b = 0; b < convInfo.batchSize; ++b) {
+ const xOffset1 = b * xBatchStride;
+ const yOffset1 = b * yBatchStride;
+ for (let yR = 0; yR < convInfo.outHeight; ++yR) {
+ const yOffset2 = yOffset1 + yR * yRowStride;
+ const xRCorner = yR * convInfo.strideHeight - padTop;
+ for (let wR = 0; wR < filterHeight; wR++) {
+ const xR = xRCorner + wR * dilationHeight;
+ if (xR < 0 || xR >= convInfo.inHeight) {
+ continue;
+ }
+ const wOffset1 = wR * filter.strides[0];
+ const xOffset2 = xOffset1 + xR * xRowStride;
+ for (let yC = 0; yC < convInfo.outWidth; ++yC) {
+ const yOffset3 = yOffset2 + yC * yColStride;
+ const xCCorner = yC * convInfo.strideWidth - padLeft;
+ for (let wC = 0; wC < filterWidth; wC++) {
+ const xC = xCCorner + wC * dilationWidth;
+ if (xC < 0 || xC >= convInfo.inWidth) {
+ continue;
+ }
+ const wOffset2 = wOffset1 + wC * filter.strides[1];
+ const xOffset3 = xOffset2 + xC * xColStride;
+ let wOffset3 = wOffset2;
+ for (let d1 = 0; d1 < convInfo.inChannels; ++d1) {
+ const xVal = xVals[xOffset3 + d1 * xChannelStride];
+ for (let d2 = 0; d2 < convInfo.outChannels; ++d2) {
+ yVals[yOffset3 + d2 * yChannelStride] +=
+ xVal * wVals[wOffset3 + d2];
+ }
+ wOffset3 += convInfo.outChannels;
+ }
+ }
+ }
+ }
+ }
+ }
+ return y.toTensor();
+ }
+ conv3d(x, filter, convInfo) {
+ const filterDepth = convInfo.filterDepth;
+ const filterHeight = convInfo.filterHeight;
+ const filterWidth = convInfo.filterWidth;
+ const dilationDepth = convInfo.dilationDepth;
+ const dilationHeight = convInfo.dilationHeight;
+ const dilationWidth = convInfo.dilationWidth;
+ const padFront = convInfo.padInfo.front;
+ const padLeft = convInfo.padInfo.left;
+ const padTop = convInfo.padInfo.top;
+ const y = dist["buffer"](convInfo.outShape, x.dtype);
+ const xVals = this.readSync(x.dataId);
+ const wVals = this.readSync(filter.dataId);
+ const yVals = y.values;
+ for (let b = 0; b < convInfo.batchSize; ++b) {
+ const xOffset1 = b * x.strides[0];
+ const yOffset1 = b * y.strides[0];
+ for (let yF = 0; yF < convInfo.outDepth; ++yF) {
+ const yOffset2 = yOffset1 + yF * y.strides[1];
+ const xFCorner = yF * convInfo.strideDepth - padFront;
+ for (let wF = 0; wF < filterDepth; wF++) {
+ const xF = xFCorner + wF * dilationDepth;
+ if (xF < 0 || xF >= convInfo.inDepth) {
+ continue;
+ }
+ const wOffset1 = wF * filter.strides[0];
+ const xOffset2 = xOffset1 + xF * x.strides[1];
+ for (let yR = 0; yR < convInfo.outHeight; ++yR) {
+ const yOffset3 = yOffset2 + yR * y.strides[2];
+ const xRCorner = yR * convInfo.strideHeight - padTop;
+ for (let wR = 0; wR < filterHeight; wR++) {
+ const xR = xRCorner + wR * dilationHeight;
+ if (xR < 0 || xR >= convInfo.inHeight) {
+ continue;
+ }
+ const wOffset2 = wOffset1 + wR * filter.strides[1];
+ const xOffset3 = xOffset2 + xR * x.strides[2];
+ for (let yC = 0; yC < convInfo.outWidth; ++yC) {
+ const yOffset4 = yOffset3 + yC * convInfo.outChannels;
+ const xCCorner = yC * convInfo.strideWidth - padLeft;
+ for (let wC = 0; wC < filterWidth; wC++) {
+ const xC = xCCorner + wC * dilationWidth;
+ if (xC < 0 || xC >= convInfo.inWidth) {
+ continue;
+ }
+ const wOffset3 = wOffset2 + wC * filter.strides[2];
+ const xOffset4 = xOffset3 + xC * convInfo.inChannels;
+ let wOffset4 = wOffset3;
+ for (let d1 = 0; d1 < convInfo.inChannels; ++d1) {
+ const xVal = xVals[xOffset4 + d1];
+ for (let d2 = 0; d2 < convInfo.outChannels; ++d2) {
+ yVals[yOffset4 + d2] += xVal * wVals[wOffset4 + d2];
+ }
+ wOffset4 += convInfo.outChannels;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ return y.toTensor();
+ }
+ conv2dDerInput(dy, filter, convInfo) {
+ Object(cpu_util["a" /* assertNotComplex */])([dy, filter], 'conv2dDerInput');
+ const dx = dist["buffer"](convInfo.inShape, 'float32');
+ const dxValues = dx.values;
+ const dyValues = this.readSync(dy.dataId);
+ const fltValues = this.readSync(filter.dataId);
+ const [fltS0, fltS1, fltS2] = filter.strides;
+ const { batchSize, filterHeight, filterWidth, inChannels, inHeight, inWidth, outChannels, outHeight, outWidth, strideHeight, strideWidth, dataFormat } = convInfo;
+ const topPad = filterHeight - 1 - convInfo.padInfo.top;
+ const leftPad = filterWidth - 1 - convInfo.padInfo.left;
+ const isChannelsLast = dataFormat === 'channelsLast';
+ const xBatchStride = dx.strides[0];
+ const xRowStride = isChannelsLast ? dx.strides[1] : dx.strides[2];
+ const xColStride = isChannelsLast ? dx.strides[2] : 1;
+ const xChannelStride = isChannelsLast ? 1 : dx.strides[1];
+ const yBatchStride = dy.strides[0];
+ const yRowStride = isChannelsLast ? dy.strides[1] : dy.strides[2];
+ const yColStride = isChannelsLast ? dy.strides[2] : 1;
+ const yChannelStride = isChannelsLast ? 1 : dy.strides[1];
+ for (let b = 0; b < batchSize; ++b) {
+ for (let d1 = 0; d1 < inChannels; ++d1) {
+ for (let xR = 0; xR < inHeight; ++xR) {
+ const xRCorner = xR - topPad;
+ const xRMin = Math.max(0, Math.ceil(xRCorner / strideHeight));
+ const yRMax = Math.min(outHeight, (filterHeight + xRCorner) / strideHeight);
+ for (let xC = 0; xC < inWidth; ++xC) {
+ const xCCorner = xC - leftPad;
+ const xCMin = Math.max(0, Math.ceil(xCCorner / strideWidth));
+ const yCMax = Math.min(outWidth, (filterWidth + xCCorner) / strideWidth);
+ let dotProd = 0;
+ for (let yR = xRMin; yR < yRMax; ++yR) {
+ const wR = yR * strideHeight - xRCorner;
+ for (let yC = xCMin; yC < yCMax; ++yC) {
+ const wC = yC * strideWidth - xCCorner;
+ const dyOffset = yBatchStride * b + yRowStride * yR + yColStride * yC;
+ const fltOffset = fltS0 * (filterHeight - 1 - wR) +
+ fltS1 * (filterWidth - 1 - wC) + fltS2 * d1;
+ for (let d2 = 0; d2 < outChannels; ++d2) {
+ const pixel = dyValues[dyOffset + yChannelStride * d2];
+ const weight = fltValues[fltOffset + d2];
+ dotProd += pixel * weight;
+ }
+ }
+ }
+ const dxOffset = xBatchStride * b + xRowStride * xR +
+ xColStride * xC + xChannelStride * d1;
+ dxValues[dxOffset] = dotProd;
+ }
+ }
+ }
+ }
+ return dx.toTensor();
+ }
+ conv3dDerInput(dy, filter, convInfo) {
+ const dx = dist["buffer"](convInfo.inShape, 'float32');
+ const dxValues = dx.values;
+ const [dxS0, dxS1, dxS2, dxS3] = dx.strides;
+ const dyValues = this.readSync(dy.dataId);
+ const [dyS0, dyS1, dyS2, dyS3] = dy.strides;
+ const fltValues = this.readSync(filter.dataId);
+ const [fltS0, fltS1, fltS2, fltS3] = filter.strides;
+ const { batchSize, filterDepth, filterHeight, filterWidth, inChannels, inDepth, inHeight, inWidth, outChannels, outDepth, outHeight, outWidth, strideDepth, strideHeight, strideWidth } = convInfo;
+ const frontPad = filterDepth - 1 - convInfo.padInfo.front;
+ const topPad = filterHeight - 1 - convInfo.padInfo.top;
+ const leftPad = filterWidth - 1 - convInfo.padInfo.left;
+ for (let b = 0; b < batchSize; ++b) {
+ for (let d1 = 0; d1 < inChannels; ++d1) {
+ // Frames of depth
+ for (let xF = 0; xF < inDepth; ++xF) {
+ const xFCorner = xF - frontPad;
+ const xFMin = Math.max(0, Math.ceil(xFCorner / strideDepth));
+ const yFMax = Math.min(outDepth, (filterDepth + xFCorner) / strideDepth);
+ // Rows as per standard 2d matrix notation
+ for (let xR = 0; xR < inHeight; ++xR) {
+ const xRCorner = xR - topPad;
+ const xRMin = Math.max(0, Math.ceil(xRCorner / strideHeight));
+ const yRMax = Math.min(outHeight, (filterHeight + xRCorner) / strideHeight);
+ // Columns as per standard 2d matrix notation
+ for (let xC = 0; xC < inWidth; ++xC) {
+ const xCCorner = xC - leftPad;
+ const xCMin = Math.max(0, Math.ceil(xCCorner / strideWidth));
+ const yCMax = Math.min(outWidth, (filterWidth + xCCorner) / strideWidth);
+ let dotProd = 0;
+ for (let yF = xFMin; yF < yFMax; ++yF) {
+ const wF = yF * strideDepth - xFCorner;
+ for (let yR = xRMin; yR < yRMax; ++yR) {
+ const wR = yR * strideHeight - xRCorner;
+ for (let yC = xCMin; yC < yCMax; ++yC) {
+ const wC = yC * strideWidth - xCCorner;
+ const dyOffset = dyS0 * b + dyS1 * yF + dyS2 * yR + dyS3 * yC;
+ const fltOffset = fltS0 * (filterDepth - 1 - wF) +
+ fltS1 * (filterHeight - 1 - wR) +
+ fltS2 * (filterWidth - 1 - wC) + fltS3 * d1;
+ for (let d2 = 0; d2 < outChannels; ++d2) {
+ const pixel = dyValues[dyOffset + d2];
+ const weight = fltValues[fltOffset + d2];
+ dotProd += pixel * weight;
+ }
+ }
+ }
+ }
+ dxValues[dxS0 * b + dxS1 * xF + dxS2 * xR + dxS3 * xC + d1] =
+ dotProd;
+ }
+ }
+ }
+ }
+ }
+ return dx.toTensor();
+ }
+ conv2dDerFilter(x, dy, convInfo) {
+ Object(cpu_util["a" /* assertNotComplex */])([x, dy], 'conv2dDerFilter');
+ const strideHeight = convInfo.strideHeight;
+ const strideWidth = convInfo.strideWidth;
+ const filterHeight = convInfo.filterHeight;
+ const filterWidth = convInfo.filterWidth;
+ const isChannelsLast = convInfo.dataFormat === 'channelsLast';
+ const dW = dist["buffer"](convInfo.filterShape, 'float32');
+ const leftPad = convInfo.padInfo.left;
+ const topPad = convInfo.padInfo.top;
+ const xBuf = this.bufferSync(x);
+ const dyBuf = this.bufferSync(dy);
+ for (let wR = 0; wR < filterHeight; ++wR) {
+ const yRMin = Math.max(0, Math.ceil((topPad - wR) / strideHeight));
+ const yRMax = Math.min(convInfo.outHeight, (convInfo.inHeight + topPad - wR) / strideHeight);
+ for (let wC = 0; wC < filterWidth; ++wC) {
+ const yCMin = Math.max(0, Math.ceil((leftPad - wC) / strideWidth));
+ const yCMax = Math.min(convInfo.outWidth, (convInfo.inWidth + leftPad - wC) / strideWidth);
+ for (let d1 = 0; d1 < convInfo.inChannels; ++d1) {
+ for (let d2 = 0; d2 < convInfo.outChannels; ++d2) {
+ // Need to convolve.
+ let dotProd = 0;
+ for (let b = 0; b < convInfo.batchSize; ++b) {
+ for (let yR = yRMin; yR < yRMax; ++yR) {
+ const xR = wR + yR * strideHeight - topPad;
+ for (let yC = yCMin; yC < yCMax; ++yC) {
+ const xC = wC + yC * strideWidth - leftPad;
+ if (isChannelsLast) {
+ dotProd +=
+ xBuf.get(b, xR, xC, d1) * dyBuf.get(b, yR, yC, d2);
+ }
+ else {
+ dotProd +=
+ xBuf.get(b, d1, xR, xC) * dyBuf.get(b, d2, yR, yC);
+ }
+ }
+ }
+ }
+ dW.set(dotProd, wR, wC, d1, d2);
+ }
+ }
+ }
+ }
+ return dW.toTensor();
+ }
+ conv3dDerFilter(x, dy, convInfo) {
+ const strideDepth = convInfo.strideDepth;
+ const strideHeight = convInfo.strideHeight;
+ const strideWidth = convInfo.strideWidth;
+ const filterDepth = convInfo.filterDepth;
+ const filterHeight = convInfo.filterHeight;
+ const filterWidth = convInfo.filterWidth;
+ const dw = dist["buffer"](convInfo.filterShape, 'float32');
+ const dwValues = dw.values;
+ const [dwS0, dwS1, dwS2, dwS3] = dw.strides;
+ const dyValues = this.readSync(dy.dataId);
+ const [dyS0, dyS1, dyS2, dyS3] = dy.strides;
+ const xValues = this.readSync(x.dataId);
+ const [xS0, xS1, xS2, xS3] = x.strides;
+ const frontPad = convInfo.padInfo.front;
+ const leftPad = convInfo.padInfo.left;
+ const topPad = convInfo.padInfo.top;
+ for (let wF = 0; wF < filterDepth; ++wF) {
+ const yFMin = Math.max(0, Math.ceil((frontPad - wF) / strideDepth));
+ const yFMax = Math.min(convInfo.outDepth, (convInfo.inDepth + frontPad - wF) / strideDepth);
+ const wOffset1 = wF * dwS0;
+ for (let wR = 0; wR < filterHeight; ++wR) {
+ const yRMin = Math.max(0, Math.ceil((topPad - wR) / strideHeight));
+ const yRMax = Math.min(convInfo.outHeight, (convInfo.inHeight + topPad - wR) / strideHeight);
+ const wOffset2 = wR * dwS1 + wOffset1;
+ for (let wC = 0; wC < filterWidth; ++wC) {
+ const yCMin = Math.max(0, Math.ceil((leftPad - wC) / strideWidth));
+ const yCMax = Math.min(convInfo.outWidth, (convInfo.inWidth + leftPad - wC) / strideWidth);
+ const wOffset3 = wC * dwS2 + wOffset2;
+ for (let d1 = 0; d1 < convInfo.inChannels; ++d1) {
+ const wOffset4 = d1 * dwS3 + wOffset3;
+ for (let d2 = 0; d2 < convInfo.outChannels; ++d2) {
+ let dotProd = 0;
+ for (let b = 0; b < convInfo.batchSize; ++b) {
+ const xOffset1 = b * xS0;
+ const yOffset1 = b * dyS0;
+ for (let yF = yFMin; yF < yFMax; ++yF) {
+ const xF = wF + yF * strideDepth - frontPad;
+ const xOffset2 = xF * xS1 + xOffset1;
+ const yOffset2 = yF * dyS1 + yOffset1;
+ for (let yR = yRMin; yR < yRMax; ++yR) {
+ const xR = wR + yR * strideHeight - topPad;
+ const xOffset3 = xR * xS2 + xOffset2;
+ const yOffset3 = yR * dyS2 + yOffset2;
+ for (let yC = yCMin; yC < yCMax; ++yC) {
+ const xC = wC + yC * strideWidth - leftPad;
+ const xOffset4 = xC * xS3 + xOffset3;
+ const yOffset4 = yC * dyS3 + yOffset3;
+ dotProd +=
+ xValues[xOffset4 + d1] * dyValues[yOffset4 + d2];
+ }
+ }
+ }
+ }
+ dwValues[wOffset4 + d2] = dotProd;
+ }
+ }
+ }
+ }
+ }
+ return dw.toTensor();
+ }
+ fusedDepthwiseConv2D({ input, filter, convInfo, bias, activation, preluActivationWeights }) {
+ let result = this.depthwiseConv2D(input, filter, convInfo);
+ if (bias) {
+ result = this.add(result, bias);
+ }
+ if (activation) {
+ result =
+ mapActivation(this, result, activation, preluActivationWeights);
+ }
+ return result;
+ }
+ depthwiseConv2D(x, filter, convInfo) {
+ Object(cpu_util["a" /* assertNotComplex */])([x, filter], 'depthwiseConv2D');
+ const filterHeight = convInfo.filterHeight;
+ const filterWidth = convInfo.filterWidth;
+ const dilationHeight = convInfo.dilationHeight;
+ const dilationWidth = convInfo.dilationWidth;
+ const padLeft = convInfo.padInfo.left;
+ const padTop = convInfo.padInfo.top;
+ const chMul = convInfo.outChannels / convInfo.inChannels;
+ const y = dist["buffer"](convInfo.outShape, x.dtype);
+ const xVals = this.readSync(x.dataId);
+ const wVals = this.readSync(filter.dataId);
+ const yVals = y.values;
+ for (let b = 0; b < convInfo.batchSize; ++b) {
+ const xOffset1 = b * x.strides[0];
+ const yOffset1 = b * y.strides[0];
+ for (let yR = 0; yR < convInfo.outHeight; ++yR) {
+ const yOffset2 = yOffset1 + yR * y.strides[1];
+ const xRCorner = yR * convInfo.strideHeight - padLeft;
+ for (let wR = 0; wR < filterHeight; ++wR) {
+ const xR = xRCorner + wR * dilationHeight;
+ if (xR < 0 || xR >= convInfo.inHeight) {
+ continue;
+ }
+ const wOffset1 = wR * filter.strides[0];
+ const xOffset2 = xOffset1 + xR * x.strides[1];
+ for (let yC = 0; yC < convInfo.outWidth; ++yC) {
+ const yOffset3 = yOffset2 + yC * y.strides[2];
+ const xCCorner = yC * convInfo.strideWidth - padTop;
+ for (let wC = 0; wC < filterWidth; ++wC) {
+ const xC = xCCorner + wC * dilationWidth;
+ if (xC < 0 || xC >= convInfo.inWidth) {
+ continue;
+ }
+ const wOffset2 = wOffset1 + wC * filter.strides[1];
+ const xOffset3 = xOffset2 + xC * convInfo.inChannels;
+ let yOffset4 = yOffset3;
+ let wOffset3 = wOffset2;
+ for (let d1 = 0; d1 < convInfo.inChannels; ++d1) {
+ const xVal = xVals[xOffset3 + d1];
+ for (let q = 0; q < chMul; ++q) {
+ yVals[yOffset4 + q] += xVal * wVals[wOffset3 + q];
+ }
+ yOffset4 += chMul;
+ wOffset3 += chMul;
+ }
+ }
+ }
+ }
+ }
+ }
+ return y.toTensor();
+ }
+ depthwiseConv2DDerInput(dy, filter, convInfo) {
+ Object(cpu_util["a" /* assertNotComplex */])([dy, filter], 'depthwiseConv2DDerInput');
+ const dx = dist["buffer"](convInfo.inShape, 'float32');
+ const dxValues = dx.values;
+ const [dxS0, dxS1, dxS2] = dx.strides;
+ const dyValues = this.readSync(dy.dataId);
+ const [dyS0, dyS1, dyS2] = dy.strides;
+ const fltValues = this.readSync(filter.dataId);
+ const [fltS0, fltS1, fltS2] = filter.strides;
+ const { batchSize, filterHeight, filterWidth, inChannels, inHeight, inWidth, outChannels, outHeight, outWidth, strideHeight, strideWidth } = convInfo;
+ const topPad = filterHeight - 1 - convInfo.padInfo.top;
+ const leftPad = filterWidth - 1 - convInfo.padInfo.left;
+ const chMul = outChannels / inChannels;
+ for (let b = 0; b < batchSize; ++b) {
+ for (let d1 = 0; d1 < inChannels; ++d1) {
+ for (let xR = 0; xR < inHeight; ++xR) {
+ const xRCorner = xR - topPad;
+ const xRMin = Math.max(0, Math.ceil(xRCorner / strideHeight));
+ const yRMax = Math.min(outHeight, (filterHeight + xRCorner) / strideHeight);
+ for (let xC = 0; xC < inWidth; ++xC) {
+ const xCCorner = xC - leftPad;
+ const xCMin = Math.max(0, Math.ceil(xCCorner / strideWidth));
+ const yCMax = Math.min(outWidth, (filterWidth + xCCorner) / strideWidth);
+ let dotProd = 0;
+ for (let yR = xRMin; yR < yRMax; ++yR) {
+ const wR = yR * strideHeight - xRCorner;
+ for (let yC = xCMin; yC < yCMax; ++yC) {
+ const wC = yC * strideWidth - xCCorner;
+ const dyOffset = dyS0 * b + dyS1 * yR + dyS2 * yC;
+ const fltOffset = fltS0 * (filterHeight - 1 - wR) +
+ fltS1 * (filterWidth - 1 - wC) + fltS2 * d1;
+ for (let dm = 0; dm < chMul; ++dm) {
+ const d2 = d1 * chMul + dm;
+ const pixel = dyValues[dyOffset + d2];
+ const weight = fltValues[fltOffset + dm];
+ dotProd += pixel * weight;
+ }
+ }
+ }
+ dxValues[dxS0 * b + dxS1 * xR + dxS2 * xC + d1] = dotProd;
+ }
+ }
+ }
+ }
+ return dx.toTensor();
+ }
+ depthwiseConv2DDerFilter(x, dy, convInfo) {
+ Object(cpu_util["a" /* assertNotComplex */])([x, dy], 'depthwiseConv2DDerFilter');
+ const strideHeight = convInfo.strideHeight;
+ const strideWidth = convInfo.strideWidth;
+ const filterHeight = convInfo.filterHeight;
+ const filterWidth = convInfo.filterWidth;
+ const dW = dist["buffer"](convInfo.filterShape, 'float32');
+ const leftPad = convInfo.padInfo.left;
+ const topPad = convInfo.padInfo.top;
+ const chMul = convInfo.outChannels / convInfo.inChannels;
+ const xBuf = this.bufferSync(x);
+ const dyBuf = this.bufferSync(dy);
+ for (let wR = 0; wR < filterHeight; ++wR) {
+ const yRMin = Math.max(0, Math.ceil((topPad - wR) / strideHeight));
+ const yRMax = Math.min(convInfo.outHeight, (convInfo.inHeight + topPad - wR) / strideHeight);
+ for (let wC = 0; wC < filterWidth; ++wC) {
+ const yCMin = Math.max(0, Math.ceil((leftPad - wC) / strideWidth));
+ const yCMax = Math.min(convInfo.outWidth, (convInfo.inWidth + leftPad - wC) / strideWidth);
+ for (let d2 = 0; d2 < convInfo.outChannels; ++d2) {
+ const d1 = Math.trunc(d2 / chMul);
+ const dm = d2 % chMul;
+ let dotProd = 0;
+ for (let b = 0; b < convInfo.batchSize; ++b) {
+ for (let yR = yRMin; yR < yRMax; ++yR) {
+ const xR = wR + yR * strideHeight - topPad;
+ for (let yC = yCMin; yC < yCMax; ++yC) {
+ const xC = wC + yC * strideWidth - leftPad;
+ dotProd += xBuf.get(b, xR, xC, d1) * dyBuf.get(b, yR, yC, d2);
+ }
+ }
+ }
+ dW.set(dotProd, wR, wC, d1, dm);
+ }
+ }
+ }
+ return dW.toTensor();
+ }
+ tile(x, reps) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'tile');
+ return tile(this.bufferSync(x), reps);
+ }
+ pad(x, paddings, constantValue) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'pad');
+ const outShape = paddings.map((p, i) => p[0] /* beforePad */ + x.shape[i] + p[1] /* afterPad */);
+ const start = paddings.map(p => p[0]);
+ const xBuffer = this.bufferSync(x);
+ const buffer = dist["buffer"](outShape, x.dtype);
+ if (constantValue !== 0) {
+ buffer.values.fill(constantValue);
+ }
+ for (let i = 0; i < x.size; i++) {
+ const coords = xBuffer.indexToLoc(i);
+ const outCoords = coords.map((c, i) => c + start[i]);
+ buffer.set(xBuffer.get(...coords), ...outCoords);
+ }
+ return buffer.toTensor();
+ }
+ gather(x, indices, axis) {
+ Object(cpu_util["a" /* assertNotComplex */])([x, indices], 'gather');
+ const newShape = x.shape.slice();
+ const indicesValues = this.readSync(indices.dataId);
+ newShape[axis] = indicesValues.length;
+ const result = dist["buffer"](newShape, x.dtype);
+ const xBuf = this.bufferSync(x);
+ for (let i = 0; i < result.size; ++i) {
+ const newLoc = result.indexToLoc(i);
+ const originalLoc = newLoc.slice();
+ originalLoc[axis] = indicesValues[newLoc[axis]];
+ const originalIndex = xBuf.locToIndex(originalLoc);
+ result.values[i] = xBuf.values[originalIndex];
+ }
+ return result.toTensor();
+ }
+ batchToSpaceND(x, blockShape, crops) {
+ Object(cpu_util["a" /* assertNotComplex */])([x], 'batchToSpaceND');
+ const prod = blockShape.reduce((a, b) => a * b);
+ const reshaped = dist["backend_util"].getReshaped(x.shape, blockShape, prod);
+ const permuted = dist["backend_util"].getPermuted(reshaped.length, blockShape.length);
+ const reshapedPermuted = dist["backend_util"].getReshapedPermuted(x.shape, blockShape, prod);
+ const sliceBeginCoords = dist["backend_util"].getSliceBeginCoords(crops, blockShape.length);
+ const sliceSize = dist["backend_util"].getSliceSize(reshapedPermuted, crops, blockShape.length);
+ return dist["transpose"](x.reshape(reshaped), permuted)
+ .reshape(reshapedPermuted)
+ .slice(sliceBeginCoords, sliceSize);
+ }
+ spaceToBatchND(x, blockShape, paddings) {
+ Object(cpu_util["a" /* assertNotComplex */])([x], 'spaceToBatchND');
+ const prod = blockShape.reduce((a, b) => a * b);
+ const completePaddings = [[0, 0]];
+ completePaddings.push(...paddings);
+ for (let i = 1 + blockShape.length; i < x.shape.length; ++i) {
+ completePaddings.push([0, 0]);
+ }
+ const paddedX = x.pad(completePaddings);
+ const reshapedPaddedShape = dist["backend_util"].getReshaped(paddedX.shape, blockShape, prod, false);
+ const permutedReshapedPaddedPermutation = dist["backend_util"].getPermuted(reshapedPaddedShape.length, blockShape.length, false);
+ const flattenShape = dist["backend_util"].getReshapedPermuted(paddedX.shape, blockShape, prod, false);
+ return dist["transpose"](paddedX.reshape(reshapedPaddedShape), permutedReshapedPaddedPermutation)
+ .reshape(flattenShape);
+ }
+ maxPool(x, convInfo) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'maxPool');
+ const xValues = this.readSync(x.dataId);
+ return Object(pool_utils["b" /* pool */])(xValues, x.shape, x.dtype, x.strides, convInfo, 'max')
+ .toTensor();
+ }
+ maxPoolBackprop(dy, x, y, convInfo) {
+ Object(cpu_util["a" /* assertNotComplex */])([x, y], 'maxPoolBackprop');
+ const xValues = this.readSync(x.dataId);
+ const maxPosBuf = Object(dist["buffer"])(convInfo.outShape, x.dtype, Object(pool_utils["a" /* maxPoolPositions */])(xValues, x.shape, x.dtype, convInfo).values);
+ const strideHeight = convInfo.strideHeight;
+ const strideWidth = convInfo.strideWidth;
+ const dilationHeight = convInfo.dilationHeight;
+ const dilationWidth = convInfo.dilationWidth;
+ const effectiveFilterHeight = convInfo.effectiveFilterHeight;
+ const effectiveFilterWidth = convInfo.effectiveFilterWidth;
+ const padLeft = effectiveFilterWidth - 1 - convInfo.padInfo.left;
+ const padTop = effectiveFilterHeight - 1 - convInfo.padInfo.top;
+ const dx = dist["buffer"](x.shape, 'float32');
+ const dyBuf = this.bufferSync(dy);
+ for (let b = 0; b < convInfo.batchSize; ++b) {
+ for (let d = 0; d < convInfo.inChannels; ++d) {
+ for (let dxR = 0; dxR < convInfo.inHeight; ++dxR) {
+ for (let dxC = 0; dxC < convInfo.inWidth; ++dxC) {
+ // Shader code begins.
+ const dyRCorner = dxR - padTop;
+ const dyCCorner = dxC - padLeft;
+ let dotProd = 0;
+ for (let wR = 0; wR < effectiveFilterHeight; wR += dilationHeight) {
+ const dyR = (dyRCorner + wR) / strideHeight;
+ if (dyR < 0 || dyR >= convInfo.outHeight ||
+ Math.floor(dyR) !== dyR) {
+ continue;
+ }
+ for (let wC = 0; wC < effectiveFilterWidth; wC += dilationWidth) {
+ const dyC = (dyCCorner + wC) / strideWidth;
+ if (dyC < 0 || dyC >= convInfo.outWidth ||
+ Math.floor(dyC) !== dyC) {
+ continue;
+ }
+ const maxPos = effectiveFilterHeight * effectiveFilterWidth -
+ 1 - maxPosBuf.get(b, dyR, dyC, d);
+ const curPos = wR * effectiveFilterWidth + wC;
+ const mask = maxPos === curPos ? 1 : 0;
+ if (mask === 0) {
+ continue;
+ }
+ const pixel = dyBuf.get(b, dyR, dyC, d);
+ dotProd += pixel * mask;
+ }
+ }
+ dx.set(dotProd, b, dxR, dxC, d);
+ }
+ }
+ }
+ }
+ return dx.toTensor();
+ }
+ avgPoolBackprop(dy, x, convInfo) {
+ Object(cpu_util["a" /* assertNotComplex */])([dy, x], 'avgPoolBackprop');
+ const strideHeight = convInfo.strideHeight;
+ const strideWidth = convInfo.strideWidth;
+ const filterHeight = convInfo.filterHeight;
+ const filterWidth = convInfo.filterWidth;
+ const dilationHeight = convInfo.dilationHeight;
+ const dilationWidth = convInfo.dilationWidth;
+ const effectiveFilterHeight = convInfo.effectiveFilterHeight;
+ const effectiveFilterWidth = convInfo.effectiveFilterWidth;
+ const padLeft = effectiveFilterWidth - 1 - convInfo.padInfo.left;
+ const padTop = effectiveFilterHeight - 1 - convInfo.padInfo.top;
+ const dx = dist["buffer"](x.shape, 'float32');
+ const avgMultiplier = 1 / (filterHeight * filterWidth);
+ const dyBuf = this.bufferSync(dy);
+ for (let b = 0; b < convInfo.batchSize; ++b) {
+ for (let d = 0; d < convInfo.inChannels; ++d) {
+ for (let dxR = 0; dxR < convInfo.inHeight; ++dxR) {
+ for (let dxC = 0; dxC < convInfo.inWidth; ++dxC) {
+ // Shader code begins.
+ const dyRCorner = dxR - padTop;
+ const dyCCorner = dxC - padLeft;
+ let dotProd = 0;
+ for (let wR = 0; wR < effectiveFilterHeight; wR += dilationHeight) {
+ const dyR = (dyRCorner + wR) / strideHeight;
+ if (dyR < 0 || dyR >= convInfo.outHeight ||
+ Math.floor(dyR) !== dyR) {
+ continue;
+ }
+ for (let wC = 0; wC < effectiveFilterWidth; wC += dilationWidth) {
+ const dyC = (dyCCorner + wC) / strideWidth;
+ if (dyC < 0 || dyC >= convInfo.outWidth ||
+ Math.floor(dyC) !== dyC) {
+ continue;
+ }
+ const pixel = dyBuf.get(b, dyR, dyC, d);
+ dotProd += pixel;
+ }
+ }
+ dx.set(dotProd * avgMultiplier, b, dxR, dxC, d);
+ }
+ }
+ }
+ }
+ return dx.toTensor();
+ }
+ pool3d(x, convInfo, poolType) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'pool3d');
+ const strideDepth = convInfo.strideDepth;
+ const strideHeight = convInfo.strideHeight;
+ const strideWidth = convInfo.strideWidth;
+ const dilationDepth = convInfo.dilationDepth;
+ const dilationHeight = convInfo.dilationHeight;
+ const dilationWidth = convInfo.dilationWidth;
+ const effectiveFilterDepth = convInfo.effectiveFilterDepth;
+ const effectiveFilterHeight = convInfo.effectiveFilterHeight;
+ const effectiveFilterWidth = convInfo.effectiveFilterWidth;
+ const padFront = convInfo.padInfo.front;
+ const padTop = convInfo.padInfo.top;
+ const padLeft = convInfo.padInfo.left;
+ const initialValue = (poolType === 'max' ? Number.NEGATIVE_INFINITY :
+ Number.POSITIVE_INFINITY);
+ const xValues = this.readSync(x.dataId);
+ const output = dist["buffer"](convInfo.outShape, x.dtype);
+ const outputVals = output.values;
+ const outputBatchStrides = convInfo.outShape[1] * convInfo.outShape[2] *
+ convInfo.outShape[3] * convInfo.outShape[4];
+ const outputDepthStrides = convInfo.outShape[2] * convInfo.outShape[3] * convInfo.outShape[4];
+ const outputRowStrides = convInfo.outShape[3] * convInfo.outShape[4];
+ const outputColStrides = convInfo.outShape[4];
+ for (let batch = 0; batch < convInfo.batchSize; ++batch) {
+ const outputBatchOffset = batch * outputBatchStrides;
+ const inputBatchOffset = batch * x.strides[0];
+ for (let channel = 0; channel < convInfo.inChannels; ++channel) {
+ for (let yDepth = 0; yDepth < convInfo.outDepth; ++yDepth) {
+ const xDepthCorner = yDepth * strideDepth - padFront;
+ let xDepthMin = xDepthCorner;
+ while (xDepthMin < 0) {
+ xDepthMin += dilationDepth;
+ }
+ const xDepthMax = Math.min(convInfo.inDepth, effectiveFilterDepth + xDepthCorner);
+ const outputDepthOffset = outputBatchOffset + yDepth * outputDepthStrides;
+ for (let yRow = 0; yRow < convInfo.outHeight; ++yRow) {
+ const xRowCorner = yRow * strideHeight - padTop;
+ let xRowMin = xRowCorner;
+ while (xRowMin < 0) {
+ xRowMin += dilationHeight;
+ }
+ const xRowMax = Math.min(convInfo.inHeight, effectiveFilterHeight + xRowCorner);
+ const outputRowOffset = outputDepthOffset + yRow * outputRowStrides;
+ for (let yCol = 0; yCol < convInfo.outWidth; ++yCol) {
+ const xColCorner = yCol * strideWidth - padLeft;
+ let xColMin = xColCorner;
+ while (xColMin < 0) {
+ xColMin += dilationWidth;
+ }
+ const xColMax = Math.min(convInfo.inWidth, effectiveFilterWidth + xColCorner);
+ // Shader code begins
+ const outputColOffset = outputRowOffset + yCol * outputColStrides;
+ let minMaxValue = initialValue;
+ let avgValue = 0;
+ let count = 0;
+ for (let xDepth = xDepthMin; xDepth < xDepthMax; xDepth += dilationDepth) {
+ const xDepthOffset = inputBatchOffset + xDepth * x.strides[1];
+ for (let xRow = xRowMin; xRow < xRowMax; xRow += dilationHeight) {
+ const xRowOffset = xDepthOffset + xRow * x.strides[2];
+ for (let xCol = xColMin; xCol < xColMax; xCol += dilationWidth) {
+ const xColOffset = xRowOffset + xCol * x.strides[3];
+ const pixel = xValues[xColOffset + channel];
+ if ((poolType === 'max' && pixel > minMaxValue)) {
+ minMaxValue = pixel;
+ }
+ else if (poolType === 'avg') {
+ avgValue += pixel;
+ count++;
+ }
+ if (isNaN(minMaxValue)) {
+ break;
+ }
+ }
+ if (isNaN(minMaxValue)) {
+ break;
+ }
+ }
+ if (isNaN(minMaxValue)) {
+ break;
+ }
+ }
+ const outputOffset = outputColOffset + channel;
+ outputVals[outputOffset] =
+ poolType === 'avg' ? avgValue / count : minMaxValue;
+ }
+ }
+ }
+ }
+ }
+ return output.toTensor();
+ }
+ avgPool3d(x, convInfo) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'avgPool3d');
+ return this.pool3d(x, convInfo, 'avg').toFloat();
+ }
+ avgPool3dBackprop(dy, x, convInfo) {
+ Object(cpu_util["a" /* assertNotComplex */])([dy, x], 'avgPool3dBackprop');
+ const strideDepth = convInfo.strideDepth;
+ const strideHeight = convInfo.strideHeight;
+ const strideWidth = convInfo.strideWidth;
+ const filterDepth = convInfo.filterDepth;
+ const filterHeight = convInfo.filterHeight;
+ const filterWidth = convInfo.filterWidth;
+ const dilationDepth = convInfo.dilationDepth;
+ const dilationHeight = convInfo.dilationHeight;
+ const dilationWidth = convInfo.dilationWidth;
+ const effectiveFilterDepth = convInfo.effectiveFilterDepth;
+ const effectiveFilterHeight = convInfo.effectiveFilterHeight;
+ const effectiveFilterWidth = convInfo.effectiveFilterWidth;
+ const padFront = effectiveFilterDepth - 1 - convInfo.padInfo.front;
+ const padLeft = effectiveFilterWidth - 1 - convInfo.padInfo.left;
+ const padTop = effectiveFilterHeight - 1 - convInfo.padInfo.top;
+ const dx = dist["buffer"](x.shape, 'float32');
+ const avgMultiplier = 1 / (filterDepth * filterHeight * filterWidth);
+ const dyBuf = this.bufferSync(dy);
+ for (let batch = 0; batch < convInfo.batchSize; ++batch) {
+ for (let channel = 0; channel < convInfo.inChannels; ++channel) {
+ for (let dxDepth = 0; dxDepth < convInfo.inDepth; ++dxDepth) {
+ for (let dxRow = 0; dxRow < convInfo.inHeight; ++dxRow) {
+ for (let dxCol = 0; dxCol < convInfo.inWidth; ++dxCol) {
+ // Shader code begins.
+ const dyDepthCorner = dxDepth - padFront;
+ const dyRowCorner = dxRow - padTop;
+ const dyColCorner = dxCol - padLeft;
+ let dotProd = 0;
+ for (let wDepth = 0; wDepth < effectiveFilterDepth; wDepth += dilationDepth) {
+ const dyDepth = (dyDepthCorner + wDepth) / strideDepth;
+ if (dyDepth < 0 || dyDepth >= convInfo.outDepth ||
+ Math.floor(dyDepth) !== dyDepth) {
+ continue;
+ }
+ for (let wRow = 0; wRow < effectiveFilterHeight; wRow += dilationHeight) {
+ const dyRow = (dyRowCorner + wRow) / strideHeight;
+ if (dyRow < 0 || dyRow >= convInfo.outHeight ||
+ Math.floor(dyRow) !== dyRow) {
+ continue;
+ }
+ for (let wCol = 0; wCol < effectiveFilterWidth; wCol += dilationWidth) {
+ const dyCol = (dyColCorner + wCol) / strideWidth;
+ if (dyCol < 0 || dyCol >= convInfo.outWidth ||
+ Math.floor(dyCol) !== dyCol) {
+ continue;
+ }
+ const pixel = dyBuf.get(batch, dyDepth, dyRow, dyCol, channel);
+ dotProd += pixel;
+ }
+ }
+ }
+ dx.set(dotProd * avgMultiplier, batch, dxDepth, dxRow, dxCol, channel);
+ }
+ }
+ }
+ }
+ }
+ return dx.toTensor();
+ }
+ maxPool3d(x, convInfo) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'maxPool3d');
+ return this.pool3d(x, convInfo, 'max').toFloat();
+ }
+ maxPool3dPositions(x, convInfo) {
+ const maxPositions = dist["buffer"](convInfo.outShape, 'int32');
+ const strideDepth = convInfo.strideDepth;
+ const strideHeight = convInfo.strideHeight;
+ const strideWidth = convInfo.strideWidth;
+ const dilationDepth = convInfo.dilationDepth;
+ const dilationHeight = convInfo.dilationHeight;
+ const dilationWidth = convInfo.dilationWidth;
+ const effectiveFilterDepth = convInfo.effectiveFilterDepth;
+ const effectiveFilterHeight = convInfo.effectiveFilterHeight;
+ const effectiveFilterWidth = convInfo.effectiveFilterWidth;
+ const padFront = convInfo.padInfo.front;
+ const padTop = convInfo.padInfo.top;
+ const padLeft = convInfo.padInfo.left;
+ const xBuf = this.bufferSync(x);
+ for (let batch = 0; batch < convInfo.batchSize; ++batch) {
+ for (let channel = 0; channel < convInfo.inChannels; ++channel) {
+ for (let yDepth = 0; yDepth < convInfo.outDepth; ++yDepth) {
+ const xDepthCorner = yDepth * strideDepth - padFront;
+ let xDepthMin = xDepthCorner;
+ while (xDepthMin < 0) {
+ xDepthMin += dilationDepth;
+ }
+ const xDepthMax = Math.min(convInfo.inDepth, effectiveFilterDepth + xDepthCorner);
+ for (let yRow = 0; yRow < convInfo.outHeight; ++yRow) {
+ const xRowCorner = yRow * strideHeight - padTop;
+ let xRowMin = xRowCorner;
+ while (xRowMin < 0) {
+ xRowMin += dilationHeight;
+ }
+ const xRowMax = Math.min(convInfo.inHeight, effectiveFilterHeight + xRowCorner);
+ for (let yCol = 0; yCol < convInfo.outWidth; ++yCol) {
+ const xColCorner = yCol * strideWidth - padLeft;
+ let xColMin = xColCorner;
+ while (xColMin < 0) {
+ xColMin += dilationWidth;
+ }
+ const xColMax = Math.min(convInfo.inWidth, effectiveFilterWidth + xColCorner);
+ // Shader code begins
+ let maxValue = Number.NEGATIVE_INFINITY;
+ let maxPosition = -1;
+ for (let xDepth = xDepthMin; xDepth < xDepthMax; xDepth += dilationDepth) {
+ const wDepth = xDepth - xDepthCorner;
+ for (let xRow = xRowMin; xRow < xRowMax; xRow += dilationHeight) {
+ const wRow = xRow - xRowCorner;
+ for (let xCol = xColMin; xCol < xColMax; xCol += dilationWidth) {
+ const wCol = xCol - xColCorner;
+ const pixel = xBuf.get(batch, xDepth, xRow, xCol, channel);
+ if (pixel >= maxValue) {
+ maxValue = pixel;
+ maxPosition = wDepth * effectiveFilterHeight *
+ effectiveFilterWidth +
+ wRow * effectiveFilterHeight + wCol;
+ }
+ }
+ }
+ }
+ maxPositions.set(maxPosition, batch, yDepth, yRow, yCol, channel);
+ }
+ }
+ }
+ }
+ }
+ return maxPositions.toTensor();
+ }
+ maxPool3dBackprop(dy, x, y, convInfo) {
+ Object(cpu_util["a" /* assertNotComplex */])([x, y], 'maxPool3dBackprop');
+ const maxPositions = this.maxPool3dPositions(x, convInfo);
+ const strideDepth = convInfo.strideDepth;
+ const strideHeight = convInfo.strideHeight;
+ const strideWidth = convInfo.strideWidth;
+ const dilationDepth = convInfo.dilationDepth;
+ const dilationHeight = convInfo.dilationHeight;
+ const dilationWidth = convInfo.dilationWidth;
+ const effectiveFilterDepth = convInfo.effectiveFilterDepth;
+ const effectiveFilterHeight = convInfo.effectiveFilterHeight;
+ const effectiveFilterWidth = convInfo.effectiveFilterWidth;
+ const padFront = effectiveFilterDepth - 1 - convInfo.padInfo.front;
+ const padLeft = effectiveFilterWidth - 1 - convInfo.padInfo.left;
+ const padTop = effectiveFilterHeight - 1 - convInfo.padInfo.top;
+ const dx = dist["buffer"](x.shape, 'float32');
+ const maxPosBuf = this.bufferSync(maxPositions);
+ const dyBuf = this.bufferSync(dy);
+ for (let batch = 0; batch < convInfo.batchSize; ++batch) {
+ for (let channel = 0; channel < convInfo.inChannels; ++channel) {
+ for (let dxDepth = 0; dxDepth < convInfo.inDepth; ++dxDepth) {
+ for (let dxRow = 0; dxRow < convInfo.inHeight; ++dxRow) {
+ for (let dxCol = 0; dxCol < convInfo.inWidth; ++dxCol) {
+ // Shader code begins
+ const dyDepthCorner = dxDepth - padFront;
+ const dyRowCorner = dxRow - padTop;
+ const dyColCorner = dxCol - padLeft;
+ let dotProd = 0;
+ for (let wDepth = 0; wDepth < effectiveFilterDepth; wDepth += dilationDepth) {
+ const dyDepth = (dyDepthCorner + wDepth) / strideDepth;
+ if (dyDepth < 0 || dyDepth >= convInfo.outDepth ||
+ Math.floor(dyDepth) !== dyDepth) {
+ continue;
+ }
+ for (let wRow = 0; wRow < effectiveFilterHeight; wRow += dilationHeight) {
+ const dyRow = (dyRowCorner + wRow) / strideHeight;
+ if (dyRow < 0 || dyRow >= convInfo.outHeight ||
+ Math.floor(dyRow) !== dyRow) {
+ continue;
+ }
+ for (let wCol = 0; wCol < effectiveFilterWidth; wCol += dilationWidth) {
+ const dyCol = (dyColCorner + wCol) / strideWidth;
+ if (dyCol < 0 || dyCol >= convInfo.outWidth ||
+ Math.floor(dyCol) !== dyCol) {
+ continue;
+ }
+ const maxPos = effectiveFilterDepth *
+ effectiveFilterHeight * effectiveFilterWidth -
+ 1 -
+ maxPosBuf.get(batch, dyDepth, dyRow, dyCol, channel);
+ const curPos = wDepth * effectiveFilterHeight * effectiveFilterWidth +
+ wRow * effectiveFilterWidth + wCol;
+ const mask = maxPos === curPos ? 1 : 0;
+ if (mask === 0) {
+ continue;
+ }
+ const pixel = dyBuf.get(batch, dyDepth, dyRow, dyCol, channel);
+ dotProd += pixel * mask;
+ }
+ }
+ }
+ dx.set(dotProd, batch, dxDepth, dxRow, dxCol, channel);
+ }
+ }
+ }
+ }
+ }
+ return dx.toTensor();
+ }
+ cast(x, dtype) {
+ return dist["backend_util"].castTensor(x, dtype, this);
+ }
+ reshape(x, shape) {
+ return dist["backend_util"].reshapeTensor(x, shape);
+ }
+ avgPool(x, convInfo) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'avgPool');
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'maxPool');
+ const xValues = this.readSync(x.dataId);
+ return Object(pool_utils["b" /* pool */])(xValues, x.shape, x.dtype, x.strides, convInfo, 'avg')
+ .toTensor()
+ .toFloat();
+ }
+ resizeBilinear(x, newHeight, newWidth, alignCorners) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'resizeBilinear');
+ const [batch, oldHeight, oldWidth, numChannels] = x.shape;
+ const xValues = this.readSync(x.dataId);
+ const result = new Float32Array(dist["util"].sizeFromShape([batch, newHeight, newWidth, numChannels]));
+ const effectiveInputSize = [
+ (alignCorners && newHeight > 1) ? oldHeight - 1 : oldHeight,
+ (alignCorners && newWidth > 1) ? oldWidth - 1 : oldWidth
+ ];
+ const effectiveOutputSize = [
+ (alignCorners && newHeight > 1) ? newHeight - 1 : newHeight,
+ (alignCorners && newWidth > 1) ? newWidth - 1 : newWidth
+ ];
+ let outputIdx = 0;
+ const effectiveRowSizeRatio = effectiveInputSize[0] / effectiveOutputSize[0];
+ const effectiveColSizeRatio = effectiveInputSize[1] / effectiveOutputSize[1];
+ for (let b = 0; b < batch; b++) {
+ for (let r = 0; r < newHeight; r++) {
+ const sourceFracRow = effectiveRowSizeRatio * r;
+ const sourceRowFloor = Math.floor(sourceFracRow);
+ const rowFrac = sourceFracRow - sourceRowFloor;
+ const sourceRowCeil = Math.min(oldHeight - 1, Math.ceil(sourceFracRow));
+ const topRowOffset = b * x.strides[0] + sourceRowFloor * x.strides[1];
+ const botRowOffset = b * x.strides[0] + sourceRowCeil * x.strides[1];
+ for (let c = 0; c < newWidth; c++) {
+ const sourceFracCol = effectiveColSizeRatio * c;
+ const sourceColFloor = Math.floor(sourceFracCol);
+ const colFrac = sourceFracCol - sourceColFloor;
+ const sourceColCeil = Math.min(oldWidth - 1, Math.ceil(sourceFracCol));
+ const topLeftOffest = topRowOffset + sourceColFloor * x.strides[2];
+ const botLeftOffset = botRowOffset + sourceColFloor * x.strides[2];
+ const topRightOffset = topRowOffset + sourceColCeil * x.strides[2];
+ const botRightOffest = botRowOffset + sourceColCeil * x.strides[2];
+ for (let d = 0; d < numChannels; d++) {
+ // Begin shader.
+ // Compute the fractional index of the source.
+ const topLeft = xValues[topLeftOffest + d];
+ const bottomLeft = xValues[botLeftOffset + d];
+ const topRight = xValues[topRightOffset + d];
+ const bottomRight = xValues[botRightOffest + d];
+ const top = topLeft + (topRight - topLeft) * colFrac;
+ const bottom = bottomLeft + (bottomRight - bottomLeft) * colFrac;
+ const newValue = top + (bottom - top) * rowFrac;
+ result[outputIdx++] = newValue;
+ }
+ }
+ }
+ }
+ return dist["tensor"](result, [batch, newHeight, newWidth, numChannels]);
+ }
+ resizeBilinearBackprop(dy, x, alignCorners) {
+ Object(cpu_util["a" /* assertNotComplex */])([dy, x], 'resizeBilinearBackprop');
+ const [batch, xHeight, xWidth, depth] = x.shape;
+ const [, yHeight, yWidth] = dy.shape;
+ const output = new Float32Array(batch * xHeight * xWidth * depth);
+ // In the backwards pass, we want to find the pixels that were generated
+ // for each pixel in the input image the forward pass and add the
+ // corresponding coefficient from dy to the gradient (with some
+ // interpolation).
+ const effectiveXSize = [
+ (alignCorners && yHeight > 1) ? xHeight - 1 : xHeight,
+ (alignCorners && yWidth > 1) ? xWidth - 1 : xWidth
+ ];
+ const effectiveYSize = [
+ (alignCorners && yHeight > 1) ? yHeight - 1 : yHeight,
+ (alignCorners && yWidth > 1) ? yWidth - 1 : yWidth
+ ];
+ const heightScale = effectiveXSize[0] / effectiveYSize[0];
+ const widthScale = effectiveXSize[1] / effectiveYSize[1];
+ // Reference implementation
+ // tslint:disable-next-line:max-line-length
+ // https://github.com/tensorflow/tensorflow/blob/3039375c86a5bbc9610c7725dcaa95d635f87ba2/tensorflow/core/kernels/resize_bilinear_op.cc#L275
+ const dyValues = this.readSync(dy.dataId);
+ let offset = 0;
+ for (let b = 0; b < batch; b++) {
+ const bOffset = b * x.strides[0];
+ for (let r = 0; r < yHeight; r++) {
+ const dxR = r * heightScale;
+ const topDxRIndex = Math.floor(dxR);
+ const bottomDxRIndex = Math.min(Math.ceil(dxR), xHeight - 1);
+ const topDxROffset = bOffset + topDxRIndex * x.strides[1];
+ const bottomDxROffset = bOffset + bottomDxRIndex * x.strides[1];
+ const dxRLerp = dxR - topDxRIndex;
+ const inverseDxRLerp = 1.0 - dxRLerp;
+ for (let c = 0; c < yWidth; c++) {
+ const dxC = c * widthScale;
+ const leftDxCIndex = Math.floor(dxC);
+ const rightDxCIndex = Math.min(Math.ceil(dxC), xWidth - 1);
+ const dxCLerp = dxC - leftDxCIndex;
+ const inverseDxCLerp = 1.0 - dxCLerp;
+ const topLeftRCOffset = topDxROffset + leftDxCIndex * x.strides[2];
+ const topRightRCOffset = topDxROffset + rightDxCIndex * x.strides[2];
+ const bottomLeftRCOffset = bottomDxROffset + leftDxCIndex * x.strides[2];
+ const bottomRightRCOffset = bottomDxROffset + rightDxCIndex * x.strides[2];
+ const inverseDxRLerpTimesInverseDxCLerp = inverseDxRLerp * inverseDxCLerp;
+ const inverseDxRLerpTimesDxCLerp = inverseDxRLerp * dxCLerp;
+ const dxRLerpTimesInverseDxCLerp = dxRLerp * inverseDxCLerp;
+ const dxRLerpTimesDxCLerp = dxRLerp * dxCLerp;
+ for (let d = 0; d < depth; d++) {
+ const dyVal = dyValues[offset++];
+ output[topLeftRCOffset + d] +=
+ dyVal * inverseDxRLerpTimesInverseDxCLerp;
+ output[topRightRCOffset + d] += dyVal * inverseDxRLerpTimesDxCLerp;
+ output[bottomLeftRCOffset + d] +=
+ dyVal * dxRLerpTimesInverseDxCLerp;
+ output[bottomRightRCOffset + d] += dyVal * dxRLerpTimesDxCLerp;
+ }
+ }
+ }
+ }
+ return dist["tensor4d"](output, [batch, xWidth, xHeight, depth], x.dtype);
+ }
+ resizeNearestNeighbor(x, newHeight, newWidth, alignCorners) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'resizeNearestNeighbor');
+ const [batch, oldHeight, oldWidth, numChannels] = x.shape;
+ const xValues = this.readSync(x.dataId);
+ const output = new Float32Array(batch * newHeight * newWidth * numChannels);
+ const effectiveInputSize = [
+ (alignCorners && newHeight > 1) ? oldHeight - 1 : oldHeight,
+ (alignCorners && newWidth > 1) ? oldWidth - 1 : oldWidth
+ ];
+ const effectiveOutputSize = [
+ (alignCorners && newHeight > 1) ? newHeight - 1 : newHeight,
+ (alignCorners && newWidth > 1) ? newWidth - 1 : newWidth
+ ];
+ const effectiveRowSizeRatio = effectiveInputSize[0] / effectiveOutputSize[0];
+ const effectiveColSizeRatio = effectiveInputSize[1] / effectiveOutputSize[1];
+ let outputOffset = 0;
+ for (let b = 0; b < batch; b++) {
+ const batchOffset = b * x.strides[0];
+ for (let r = 0; r < newHeight; r++) {
+ const sourceFracRow = effectiveRowSizeRatio * r;
+ const sourceNearestRow = Math.min(oldHeight - 1, alignCorners ? Math.round(sourceFracRow) :
+ Math.floor(sourceFracRow));
+ const rowOffset = batchOffset + sourceNearestRow * x.strides[1];
+ for (let c = 0; c < newWidth; c++) {
+ const sourceFracCol = effectiveColSizeRatio * c;
+ const sourceNearestCol = Math.min(oldWidth - 1, alignCorners ? Math.round(sourceFracCol) :
+ Math.floor(sourceFracCol));
+ const colOffset = rowOffset + sourceNearestCol * x.strides[2];
+ for (let d = 0; d < numChannels; d++) {
+ // Begin shader.
+ // Compute the fractional index of the source.
+ const newVal = xValues[colOffset + d];
+ output[outputOffset++] = newVal;
+ }
+ }
+ }
+ }
+ return dist["tensor"](output, [batch, newHeight, newWidth, numChannels], x.dtype);
+ }
+ resizeNearestNeighborBackprop(dy, x, alignCorners) {
+ Object(cpu_util["a" /* assertNotComplex */])([dy, x], 'resizeNearestNeighborBackprop');
+ const [batch, xHeight, xWidth, depth] = x.shape;
+ const [, yHeight, yWidth] = dy.shape;
+ const output = new Float32Array(batch * xHeight * xWidth * depth);
+ const dyValues = this.readSync(dy.dataId);
+ // In the backwards pass, we want to find the pixels that were generated
+ // for each pixel in the input image the forward pass
+ const effectiveXSize = [
+ (alignCorners && yHeight > 1) ? xHeight - 1 : xHeight,
+ (alignCorners && yWidth > 1) ? xWidth - 1 : xWidth
+ ];
+ const effectiveYSize = [
+ (alignCorners && yHeight > 1) ? yHeight - 1 : yHeight,
+ (alignCorners && yWidth > 1) ? yWidth - 1 : yWidth
+ ];
+ const heightScale = effectiveXSize[0] / effectiveYSize[0];
+ const widthScale = effectiveXSize[1] / effectiveYSize[1];
+ const invHeightScale = 1 / heightScale;
+ const invWidthScale = 1 / widthScale;
+ // This defines the size of the window of values around a particular
+ // index in dy that we want to search for contributions to dx.
+ const winHeight = (Math.ceil(invHeightScale) * 2) + 2;
+ const winWidth = (Math.ceil(invWidthScale) * 2) + 2;
+ // Loop over the output space.
+ for (let b = 0; b < batch; b++) {
+ const batchOffset = b * x.strides[0];
+ for (let r = 0; r < xHeight; r++) {
+ const rowOffset = batchOffset + r * x.strides[1];
+ // Compute bounds for where in dy we will look
+ const startRLerp = Math.floor(r * invHeightScale);
+ const startDyR = Math.floor(startRLerp - (winHeight / 2));
+ for (let c = 0; c < xWidth; c++) {
+ const colOffset = rowOffset + c * x.strides[2];
+ // Compute bounds for where in dy we will look
+ const startCLerp = Math.floor(c * invWidthScale);
+ const startDyC = Math.floor(startCLerp - (winWidth / 2));
+ for (let d = 0; d < depth; d++) {
+ let accum = 0;
+ // loop over dy
+ for (let dyRIndex = 0; dyRIndex < winHeight; dyRIndex++) {
+ const dyR = dyRIndex + startDyR;
+ // Guard against the window exceeding the bounds of dy
+ if (dyR < 0 || dyR >= yHeight) {
+ continue;
+ }
+ const dyROffset = batchOffset + dyR * dy.strides[1];
+ const sourceFracRow = dyR * heightScale;
+ const sourceNearestRow = Math.min(xHeight - 1, alignCorners ? Math.round(sourceFracRow) :
+ Math.floor(sourceFracRow));
+ if (r !== sourceNearestRow) {
+ continue;
+ }
+ for (let dyCIndex = 0; dyCIndex < winWidth; dyCIndex++) {
+ const dyC = dyCIndex + startDyC;
+ // Guard against the window exceeding the bounds of dy
+ if (dyC < 0 || dyC >= yWidth) {
+ continue;
+ }
+ const dyCOffset = dyROffset + dyC * dy.strides[2];
+ const sourceFracCol = dyC * widthScale;
+ const sourceNearestCol = Math.min(xWidth - 1, alignCorners ? Math.round(sourceFracCol) :
+ Math.floor(sourceFracCol));
+ if (c === sourceNearestCol) {
+ accum += dyValues[dyCOffset + d];
+ }
+ }
+ }
+ output[colOffset + d] = accum;
+ }
+ }
+ }
+ }
+ return dist["tensor4d"](output, x.shape, x.dtype);
+ }
+ batchNorm(x, mean, variance, offset, scale, varianceEpsilon) {
+ Object(cpu_util["a" /* assertNotComplex */])([x, mean, variance, scale, offset], 'batchNorm');
+ const xVals = this.readSync(x.dataId);
+ const mVals = this.readSync(mean.dataId);
+ const varVals = this.readSync(variance.dataId);
+ const sVals = scale ? this.readSync(scale.dataId) :
+ new Float32Array([1]);
+ const offVals = offset ? this.readSync(offset.dataId) :
+ new Float32Array([0]);
+ const outVals = new Float32Array(xVals.length);
+ const offValsLength = offVals.length;
+ const sValsLength = sVals.length;
+ const varValsLength = varVals.length;
+ const mValsLength = mVals.length;
+ let offi = 0;
+ let mi = 0;
+ let si = 0;
+ let vi = 0;
+ for (let i = 0; i < xVals.length; ++i) {
+ outVals[i] = offVals[offi++] +
+ (xVals[i] - mVals[mi++]) * sVals[si++] /
+ Math.sqrt(varVals[vi++] + varianceEpsilon);
+ if (offi >= offValsLength) {
+ offi = 0;
+ }
+ if (mi >= mValsLength) {
+ mi = 0;
+ }
+ if (si >= sValsLength) {
+ si = 0;
+ }
+ if (vi >= varValsLength) {
+ vi = 0;
+ }
+ }
+ return dist["tensor4d"](outVals, x.shape);
+ }
+ localResponseNormalization4D(x, depthRadius, bias, alpha, beta) {
+ Object(cpu_util["a" /* assertNotComplex */])(x, 'localResponseNormalization4D');
+ const channels = x.shape[3];
+ const maxD = channels - 1;
+ const xValues = this.readSync(x.dataId);
+ const size = x.size;
+ const result = new Float32Array(size);
+ function sumAcrossChannels(offset) {
+ const currentChannel = offset % channels;
+ let beginSumOffset = offset - currentChannel + Math.max(0, currentChannel - depthRadius);
+ const endSumOffset = offset - currentChannel +
+ Math.min(currentChannel + depthRadius, maxD);
+ let sum = 0.0;
+ for (; beginSumOffset <= endSumOffset; beginSumOffset++) {
+ const z = xValues[beginSumOffset];
+ sum += z * z;
+ }
+ return sum;
+ }
+ for (let offset = 0; offset < size; offset++) {
+ const sum = sumAcrossChannels(offset);
+ const val = xValues[offset] * Math.pow(bias + alpha * sum, -beta);
+ result[offset] = val;
+ }
+ return dist["tensor4d"](result, x.shape);
+ }
+ LRNGrad(dy, inputImage, outputImage, depthRadius, bias, alpha, beta) {
+ Object(cpu_util["a" /* assertNotComplex */])(dy, 'LRNGrad');
+ const channels = dy.shape[3];
+ const dyValues = this.readSync(dy.dataId);
+ const inputImageValues = this.readSync(inputImage.dataId);
+ const outputImageValues = this.readSync(outputImage.dataId);
+ const result = new Float32Array(dy.size);
+ const size = dy.size;
+ for (let offset = 0; offset < size; offset++) {
+ const currentChannel = offset % channels;
+ const depthBegin = (offset - currentChannel) + Math.max(0, currentChannel - depthRadius);
+ const depthEnd = (offset - currentChannel) +
+ Math.min(channels, currentChannel + depthRadius + 1);
+ let norm = 0;
+ for (let k = depthBegin; k < depthEnd; k++) {
+ norm += Math.pow(inputImageValues[k], 2);
+ }
+ norm = alpha * norm + bias;
+ for (let k = depthBegin; k < depthEnd; k++) {
+ let dyi = -2 * alpha * beta * inputImageValues[k] *
+ outputImageValues[offset] / norm;
+ if (offset === k) {
+ dyi += Math.pow(norm, -beta);
+ }
+ dyi *= dyValues[offset];
+ result[k] += dyi;
+ }
+ }
+ return dist["tensor4d"](result, dy.shape);
+ }
+ multinomial(logits, normalized, numSamples, seed) {
+ Object(cpu_util["a" /* assertNotComplex */])(logits, 'multinomial');
+ const probabilities = normalized ? logits : dist["softmax"](logits);
+ const batchSize = probabilities.shape[0];
+ const numEvents = probabilities.shape[1];
+ const res = dist["zeros"]([batchSize, numSamples], 'int32');
+ const resVals = this.readSync(res.dataId);
+ const probVals = this.readSync(probabilities.dataId);
+ for (let b = 0; b < batchSize; ++b) {
+ const offset = b * numEvents;
+ // The cdf won't include the last event. It will be implicit if no other
+ // event happened.
+ const cdf = new Float32Array(numEvents - 1);
+ cdf[0] = probVals[offset];
+ for (let event = 1; event < cdf.length; ++event) {
+ cdf[event] = cdf[event - 1] + probVals[offset + event];
+ }
+ const random = seedrandom["alea"](seed.toString());
+ const outOffset = b * numSamples;
+ for (let sampleId = 0; sampleId < numSamples; ++sampleId) {
+ const r = random();
+ // Assume last event happened by default.
+ resVals[outOffset + sampleId] = cdf.length;
+ for (let event = 0; event < cdf.length; event++) {
+ if (r < cdf[event]) {
+ resVals[outOffset + sampleId] = event;
+ break;
+ }
+ }
+ }
+ }
+ return res;
+ }
+ oneHot(indices, depth, onValue, offValue) {
+ Object(cpu_util["a" /* assertNotComplex */])(indices, 'oneHot');
+ const res = new Float32Array(indices.size * depth);
+ res.fill(offValue);
+ const indicesVal = this.readSync(indices.dataId);
+ for (let event = 0; event < indices.size; ++event) {
+ if (indicesVal[event] >= 0 && indicesVal[event] < depth) {
+ res[event * depth + indicesVal[event]] = onValue;
+ }
+ }
+ return dist["tensor2d"](res, [indices.size, depth], 'int32');
+ }
+ nonMaxSuppression(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold) {
+ Object(cpu_util["a" /* assertNotComplex */])(boxes, 'nonMaxSuppression');
+ const boxesVals = this.readSync(boxes.dataId);
+ const scoresVals = this.readSync(scores.dataId);
+ return nonMaxSuppressionV3(boxesVals, scoresVals, maxOutputSize, iouThreshold, scoreThreshold);
+ }
+ fft(x) {
+ return this.fftBatch(x, false);
+ }
+ ifft(x) {
+ return this.fftBatch(x, true);
+ }
+ /**
+ * Calculate FFT of inner most elements of batch tensor.
+ */
+ fftBatch(x, inverse) {
+ const batch = x.shape[0];
+ const innerDim = x.shape[1];
+ // Collects real and imaginary values separately.
+ const realResult = dist["buffer"](x.shape, 'float32');
+ const imagResult = dist["buffer"](x.shape, 'float32');
+ const real = dist["real"](x).as2D(batch, innerDim);
+ const imag = dist["imag"](x).as2D(batch, innerDim);
+ for (let b = 0; b < batch; b++) {
+ // TODO: Support slice ops for complex type.
+ const r = real.slice([b, 0], [1, innerDim]);
+ const i = imag.slice([b, 0], [1, innerDim]);
+ const input = dist["complex"](r, i);
+ // Run FFT by batch element.
+ const res = this.readSync(this.fftImpl(input, inverse).dataId);
+ for (let d = 0; d < innerDim; d++) {
+ const c = dist["backend_util"].getComplexWithIndex(res, d);
+ realResult.values[b * innerDim + d] = c.real;
+ imagResult.values[b * innerDim + d] = c.imag;
+ }
+ }
+ const t = dist["complex"](realResult.toTensor(), imagResult.toTensor());
+ return t.as2D(batch, innerDim);
+ }
+ fftImpl(x, inverse) {
+ const x1D = x.as1D();
+ const n = x1D.size;
+ if (this.isExponentOf2(n)) {
+ let result = this.fftRadix2(x1D, n, inverse).as2D(x.shape[0], x.shape[1]);
+ if (inverse) {
+ result = dist["complex"](dist["real"](result).div(dist["scalar"](n)), dist["imag"](result).div(dist["scalar"](n)));
+ }
+ return result;
+ }
+ else {
+ const data = this.readSync(x.dataId);
+ const rawOutput = this.fourierTransformByMatmul(data, n, inverse);
+ const output = dist["backend_util"].splitRealAndImagArrays(rawOutput);
+ return dist["complex"](output.real, output.imag).as2D(x.shape[0], x.shape[1]);
+ }
+ }
+ isExponentOf2(size) {
+ return (size & size - 1) === 0;
+ }
+ // FFT using Cooley-Tukey algorithm on radix 2 dimensional input.
+ fftRadix2(input, size, inverse) {
+ if (size === 1) {
+ return input;
+ }
+ const data = this.readSync(input.dataId);
+ const half = size / 2;
+ const evenComplex = dist["backend_util"].complexWithEvenIndex(data);
+ let evenTensor = dist["complex"](evenComplex.real, evenComplex.imag).as1D();
+ const oddComplex = dist["backend_util"].complexWithOddIndex(data);
+ let oddTensor = dist["complex"](oddComplex.real, oddComplex.imag).as1D();
+ // Recursive call for half part of original input.
+ evenTensor = this.fftRadix2(evenTensor, half, inverse);
+ oddTensor = this.fftRadix2(oddTensor, half, inverse);
+ const e = dist["backend_util"].exponents(size, inverse);
+ const exponent = dist["complex"](e.real, e.imag).mul(oddTensor);
+ const addPart = evenTensor.add(exponent);
+ const subPart = evenTensor.sub(exponent);
+ const realTensor = dist["real"](addPart).concat(dist["real"](subPart));
+ const imagTensor = dist["imag"](addPart).concat(dist["imag"](subPart));
+ return dist["complex"](realTensor, imagTensor).as1D();
+ }
+ // Calculate fourier transform by multplying sinusoid matrix.
+ fourierTransformByMatmul(data, size, inverse) {
+ const ret = new Float32Array(size * 2);
+ // TODO: Use matmul instead once it supports complex64 type.
+ for (let r = 0; r < size; r++) {
+ let real = 0.0;
+ let imag = 0.0;
+ for (let c = 0; c < size; c++) {
+ const e = dist["backend_util"].exponent(r * c, size, inverse);
+ const term = dist["backend_util"].getComplexWithIndex(data, c);
+ real += term.real * e.real - term.imag * e.imag;
+ imag += term.real * e.imag + term.imag * e.real;
+ }
+ if (inverse) {
+ real /= size;
+ imag /= size;
+ }
+ dist["backend_util"].assignToTypedArray(ret, real, imag, r);
+ }
+ return ret;
+ }
+ depthToSpace(x, blockSize, dataFormat) {
+ dist["util"].assert(dataFormat === 'NHWC', () => `Only NHWC dataFormat supported on CPU for depthToSpace. Got ${dataFormat}`);
+ dist["util"].assert(blockSize > 1, () => `blockSize should be > 1 for depthToSpace, but was: ${blockSize}`);
+ const batchSize = x.shape[0];
+ const inputHeight = x.shape[1];
+ const inputWidth = x.shape[2];
+ const inputDepth = x.shape[3];
+ const outputHeight = inputHeight * blockSize;
+ const outputWidth = inputWidth * blockSize;
+ const outputDepth = inputDepth / (blockSize * blockSize);
+ const xValues = this.readSync(x.dataId);
+ const result = new Float32Array(batchSize * outputHeight * outputWidth * outputDepth);
+ let outputIdx = 0;
+ for (let b = 0; b < batchSize; ++b) {
+ for (let h = 0; h < outputHeight; ++h) {
+ const inH = Math.floor(h / blockSize);
+ const offsetH = (h % blockSize);
+ for (let w = 0; w < outputWidth; ++w) {
+ const inW = Math.floor(w / blockSize);
+ const offsetW = (w % blockSize);
+ const offsetD = (offsetH * blockSize + offsetW) * outputDepth;
+ for (let d = 0; d < outputDepth; ++d) {
+ const inD = d + offsetD;
+ const inputIdx = inD + inputDepth * (inW + inputWidth * (inH + inputHeight * b));
+ result[outputIdx++] = xValues[inputIdx];
+ }
+ }
+ }
+ }
+ return dist["tensor4d"](result, [batchSize, outputHeight, outputWidth, outputDepth]);
+ }
+ broadcastedBinaryOp(a, b, dtype, op) {
+ const newShape = dist["backend_util"].assertAndGetBroadcastShape(a.shape, b.shape);
+ const result = dist["buffer"](newShape, dtype);
+ const aVals = this.readSync(a.dataId);
+ const bVals = this.readSync(b.dataId);
+ const aBroadcastDims = dist["backend_util"].getBroadcastDims(a.shape, newShape);
+ const bBroadcastDims = dist["backend_util"].getBroadcastDims(b.shape, newShape);
+ const resVals = result.values;
+ if (aBroadcastDims.length + bBroadcastDims.length === 0) {
+ for (let i = 0; i < resVals.length; ++i) {
+ resVals[i] = op(aVals[i % aVals.length], bVals[i % bVals.length]);
+ }
+ }
+ else {
+ const aBuf = this.bufferSync(a);
+ const bBuf = this.bufferSync(b);
+ for (let i = 0; i < resVals.length; ++i) {
+ const loc = result.indexToLoc(i);
+ const aLoc = loc.slice(-a.rank);
+ aBroadcastDims.forEach(d => aLoc[d] = 0);
+ const aIndex = aBuf.locToIndex(aLoc);
+ const bLoc = loc.slice(-b.rank);
+ bBroadcastDims.forEach(d => bLoc[d] = 0);
+ const bIndex = bBuf.locToIndex(bLoc);
+ resVals[i] = op(aVals[aIndex], bVals[bIndex]);
+ }
+ }
+ return result.toTensor();
+ }
+ broadcastedBinaryComplexOp(a, b, op) {
+ const newShape = dist["backend_util"].assertAndGetBroadcastShape(a.shape, b.shape);
+ const realResult = dist["buffer"](newShape, 'float32');
+ const imagResult = dist["buffer"](newShape, 'float32');
+ const aVals = this.readSync(a.dataId);
+ const bVals = this.readSync(b.dataId);
+ const aBroadcastDims = dist["backend_util"].getBroadcastDims(a.shape, newShape);
+ const bBroadcastDims = dist["backend_util"].getBroadcastDims(b.shape, newShape);
+ const realVals = realResult.values;
+ const imagVals = imagResult.values;
+ if (aBroadcastDims.length + bBroadcastDims.length === 0) {
+ for (let i = 0; i < realVals.length; i++) {
+ const aIdx = i % aVals.length;
+ const bIdx = i % bVals.length;
+ const result = op(aVals[aIdx * 2], aVals[aIdx * 2 + 1], bVals[bIdx * 2], bVals[bIdx * 2 + 1]);
+ realVals[i] = result.real;
+ imagVals[i] = result.imag;
+ }
+ }
+ else {
+ const aRealBuf = this.bufferSync(this.data.get(a.dataId).complexTensors.real);
+ const bRealBuf = this.bufferSync(this.data.get(b.dataId).complexTensors.real);
+ for (let i = 0; i < realVals.length; i++) {
+ const loc = realResult.indexToLoc(i);
+ const aLoc = loc.slice(-a.rank);
+ aBroadcastDims.forEach(d => aLoc[d] = 0);
+ const aIndex = aRealBuf.locToIndex(aLoc);
+ const bLoc = loc.slice(-b.rank);
+ bBroadcastDims.forEach(d => bLoc[d] = 0);
+ const bIndex = bRealBuf.locToIndex(bLoc);
+ const opResult = op(aVals[aIndex * 2], aVals[aIndex * 2 + 1], bVals[bIndex * 2], bVals[bIndex * 2 + 1]);
+ realVals[i] = opResult.real;
+ imagVals[i] = opResult.imag;
+ }
+ }
+ return this.complex(realResult.toTensor(), imagResult.toTensor());
+ }
+ split(x, sizeSplits, axis) {
+ return split(x, sizeSplits, axis);
+ }
+ dispose() { }
+ floatPrecision() {
+ return 32;
+ }
+ /** Returns the smallest representable number. */
+ epsilon() {
+ return super.epsilon();
+ }
+ cropAndResize(images, boxes, boxIndex, cropSize, method, extrapolationValue) {
+ const [batch, imageHeight, imageWidth, numChannels] = images.shape;
+ const numBoxes = boxes.shape[0];
+ const [cropHeight, cropWidth] = cropSize;
+ const output = dist["buffer"]([numBoxes, cropHeight, cropWidth, numChannels], 'float32');
+ const boxVals = this.readSync(boxes.dataId);
+ const boxIndVals = this.readSync(boxIndex.dataId);
+ const imageVals = this.readSync(images.dataId);
+ const inStride = images.strides; // to calculate flat indexes into image
+ const outStride = output.strides; // to calculate flat indexes into output
+ // Reference implementation
+ // tslint:disable-next-line:max-line-length
+ // https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/kernels/crop_and_resize_op.cc
+ for (let b = 0; b < numBoxes; b++) {
+ const startInd = b * 4;
+ const y1 = boxVals[startInd];
+ const x1 = boxVals[startInd + 1];
+ const y2 = boxVals[startInd + 2];
+ const x2 = boxVals[startInd + 3];
+ const bInd = boxIndVals[b];
+ if (bInd >= batch) {
+ continue;
+ }
+ const heightScale = (cropHeight > 1) ?
+ (y2 - y1) * (imageHeight - 1) / (cropHeight - 1) :
+ 0;
+ const widthScale = (cropWidth > 1) ? (x2 - x1) * (imageWidth - 1) / (cropWidth - 1) : 0;
+ for (let y = 0; y < cropHeight; y++) {
+ const yInd = (cropHeight > 1) ?
+ y1 * (imageHeight - 1) + y * (heightScale) :
+ 0.5 * (y1 + y2) * (imageHeight - 1);
+ if (yInd < 0 || yInd > imageHeight - 1) {
+ for (let x = 0; x < cropWidth; x++) {
+ for (let c = 0; c < numChannels; c++) {
+ const ind = c + x * outStride[2] + y * outStride[1] + b * outStride[0];
+ output.values[ind] = extrapolationValue;
+ }
+ }
+ continue;
+ }
+ if (method === 'bilinear') {
+ const topInd = Math.floor(yInd);
+ const bottomInd = Math.ceil(yInd);
+ const yLerp = yInd - topInd;
+ for (let x = 0; x < cropWidth; x++) {
+ const xInd = (cropWidth > 1) ?
+ x1 * (imageWidth - 1) + x * widthScale :
+ 0.5 * (x1 + x2) * (imageWidth - 1);
+ if (xInd < 0 || xInd > imageWidth - 1) {
+ for (let c = 0; c < numChannels; c++) {
+ const ind = c + x * outStride[2] + y * outStride[1] + b * outStride[0];
+ output.values[ind] = extrapolationValue;
+ }
+ continue;
+ }
+ const leftInd = Math.floor(xInd);
+ const rightInd = Math.ceil(xInd);
+ const xLerp = xInd - leftInd;
+ for (let c = 0; c < numChannels; c++) {
+ let ind = c + leftInd * inStride[2] + topInd * inStride[1] +
+ bInd * inStride[0];
+ const topLeft = imageVals[ind];
+ ind = c + rightInd * inStride[2] + topInd * inStride[1] +
+ bInd * inStride[0];
+ const topRight = imageVals[ind];
+ ind = c + leftInd * inStride[2] + bottomInd * inStride[1] +
+ bInd * inStride[0];
+ const bottomLeft = imageVals[ind];
+ ind = c + rightInd * inStride[2] + bottomInd * inStride[1] +
+ bInd * inStride[0];
+ const bottomRight = imageVals[ind];
+ const top = topLeft + (topRight - topLeft) * xLerp;
+ const bottom = bottomLeft + (bottomRight - bottomLeft) * xLerp;
+ ind = c + x * outStride[2] + y * outStride[1] + b * outStride[0];
+ output.values[ind] = top + ((bottom - top) * yLerp);
+ }
+ }
+ }
+ else { // method == "nearest"
+ for (let x = 0; x < cropWidth; ++x) {
+ const xInd = (cropWidth > 1) ?
+ x1 * (imageWidth - 1) + x * widthScale :
+ 0.5 * (x1 + x2) * (imageWidth - 1);
+ if (xInd < 0 || xInd > imageWidth - 1) {
+ for (let c = 0; c < numChannels; c++) {
+ const ind = c + x * outStride[2] + y * outStride[1] + b * outStride[0];
+ output.values[ind] = extrapolationValue;
+ }
+ continue;
+ }
+ const closestX = Math.round(xInd);
+ const closestY = Math.round(yInd);
+ for (let c = 0; c < numChannels; c++) {
+ const inInd = c + closestX * inStride[2] +
+ closestY * inStride[1] + bInd * inStride[0];
+ const outInd = c + x * outStride[2] + y * outStride[1] + b * outStride[0];
+ output.values[outInd] = imageVals[inInd];
+ }
+ }
+ }
+ }
+ }
+ return output.toTensor();
+ }
+ sparseToDense(sparseIndices, sparseValues, outputShape, defaultValue) {
+ const { sliceRank, numUpdates, sliceSize, strides, outputSize } = dist["backend_util"].calculateShapes(sparseValues, sparseIndices, outputShape);
+ const sumDupeIndices = false;
+ return this.scatter(sparseIndices, sparseValues, outputShape, outputSize, sliceSize, numUpdates, sliceRank, strides, defaultValue, sumDupeIndices);
+ }
+ gatherND(x, indices) {
+ const indicesShape = indices.shape;
+ const sliceRank = indicesShape[indicesShape.length - 1];
+ const [resultShape, numSlices, sliceSize, strides] = dist["backend_util"].prepareAndValidate(x, indices);
+ if (numSlices === 0) {
+ return dist["tensor"]([], resultShape, x.dtype);
+ }
+ const buffer = new dist["TensorBuffer"]([numSlices, sliceSize], x.dtype);
+ const indicesData = this.readSync(indices.dataId);
+ const xData = this.readSync(x.dataId);
+ for (let i = 0; i < numSlices; i++) {
+ const index = [];
+ let flattenIndex = 0;
+ for (let j = 0; j < sliceRank; j++) {
+ const dim = indicesData[i * sliceRank + j];
+ flattenIndex += dim * strides[j];
+ index.push(dim);
+ }
+ if (flattenIndex < 0 || flattenIndex >= x.size / sliceSize) {
+ throw new Error(`Invalid indices: ${index} does not index into ${x.shape}`);
+ }
+ for (let k = 0; k < sliceSize; k++) {
+ buffer.values[i * sliceSize + k] = xData[flattenIndex * sliceSize + k];
+ }
+ }
+ return buffer.toTensor().reshape(resultShape);
+ }
+ scatterND(indices, updates, shape) {
+ const { sliceRank, numUpdates, sliceSize, strides, outputSize } = dist["backend_util"].calculateShapes(updates, indices, shape);
+ const defaultValue = dist["scalar"](0);
+ const sumDupeIndices = true;
+ return this.scatter(indices, updates, shape, outputSize, sliceSize, numUpdates, sliceRank, strides, defaultValue, sumDupeIndices);
+ }
+ fill(shape, value, dtype) {
+ dtype = dtype || dist["util"].inferDtype(value);
+ const values = dist["util"].getArrayFromDType(dtype, dist["util"].sizeFromShape(shape));
+ values.fill(value);
+ return Object(dist["engine"])().makeTensor(values, shape, dtype, this);
+ }
+ onesLike(x) {
+ if (x.dtype === 'string') {
+ throw new Error('onesLike is not supported for string tensors');
+ }
+ else {
+ return this.fill(x.shape, 1, x.dtype);
+ }
+ }
+ zerosLike(x) {
+ const values = dist["util"].getArrayFromDType(x.dtype, dist["util"].sizeFromShape(x.shape));
+ return this.makeOutput(values, x.shape, x.dtype);
+ }
+ linspace(start, stop, num) {
+ return dist["backend_util"].linspaceImpl(start, stop, num);
+ }
+ scatter(indices, updates, shape, outputSize, sliceSize, numUpdates, sliceRank, strides, defaultValue, sumDupeIndices) {
+ const flattenShape = [outputSize / sliceSize, sliceSize];
+ const indicesData = this.readSync(indices.dataId);
+ const updatesData = this.readSync(updates.dataId);
+ if (outputSize === 0) {
+ return dist["tensor"]([], shape, updates.dtype);
+ }
+ const buffer = new dist["TensorBuffer"](flattenShape, updates.dtype);
+ buffer.values.fill(this.readSync(defaultValue.dataId)[0]);
+ for (let i = 0; i < numUpdates; i++) {
+ const index = [];
+ let flattenIndex = 0;
+ for (let j = 0; j < sliceRank; j++) {
+ const dim = indicesData[i * sliceRank + j];
+ index.push(dim);
+ flattenIndex += dim * strides[j];
+ }
+ if (flattenIndex < 0 || flattenIndex >= outputSize / sliceSize) {
+ throw new Error(`Invalid indices: ${index} does not index into ${shape}`);
+ }
+ for (let k = 0; k < sliceSize; k++) {
+ if (sumDupeIndices) {
+ buffer.values[flattenIndex * sliceSize + k] +=
+ updatesData[i * sliceSize + k];
+ }
+ else {
+ buffer.values[flattenIndex * sliceSize + k] = updates.rank === 0 ?
+ updatesData[0] :
+ updatesData[i * sliceSize + k];
+ }
+ }
+ }
+ return buffer.toTensor().reshape(shape);
+ }
+}
+//# sourceMappingURL=backend_cpu.js.map
+// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-backend-cpu/dist/version.js
+/** @license See the LICENSE file. */
+// This code is auto-generated, do not modify this file!
+const version = '2.0.1';
+
+//# sourceMappingURL=version.js.map
+// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-backend-cpu/dist/base.js
+/**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+/*
+ * base.ts contains all the exports from tfjs-backend-cpu
+ * that do not trigger side effects.
+ */
+
+
+
+
+//# sourceMappingURL=base.js.map
+
+/***/ }),
+/* 32 */
+/***/ (function(module, exports, __webpack_require__) {
+
+/* WEBPACK VAR INJECTION */(function(global) {var require;var require;/*!
+ localForage -- Offline Storage, Improved
+ Version 1.7.3
+ https://localforage.github.io/localForage
+ (c) 2013-2017 Mozilla, Apache License 2.0
+*/
+(function(f){if(true){module.exports=f()}else { var g; }})(function(){var define,module,exports;return (function e(t,n,r){function s(o,u){if(!n[o]){if(!t[o]){var a=typeof require=="function"&&require;if(!u&&a)return require(o,!0);if(i)return i(o,!0);var f=new Error("Cannot find module '"+o+"'");throw (f.code="MODULE_NOT_FOUND", f)}var l=n[o]={exports:{}};t[o][0].call(l.exports,function(e){var n=t[o][1][e];return s(n?n:e)},l,l.exports,e,t,n,r)}return n[o].exports}var i=typeof require=="function"&&require;for(var o=0;o element; its readystatechange event will be fired asynchronously once it is inserted
+ // into the document. Do so, thus queuing up the task. Remember to clean up once it's been called.
+ var scriptEl = global.document.createElement('script');
+ scriptEl.onreadystatechange = function () {
+ nextTick();
+
+ scriptEl.onreadystatechange = null;
+ scriptEl.parentNode.removeChild(scriptEl);
+ scriptEl = null;
+ };
+ global.document.documentElement.appendChild(scriptEl);
+ };
+ } else {
+ scheduleDrain = function () {
+ setTimeout(nextTick, 0);
+ };
+ }
+}
+
+var draining;
+var queue = [];
+//named nextTick for less confusing stack traces
+function nextTick() {
+ draining = true;
+ var i, oldQueue;
+ var len = queue.length;
+ while (len) {
+ oldQueue = queue;
+ queue = [];
+ i = -1;
+ while (++i < len) {
+ oldQueue[i]();
+ }
+ len = queue.length;
+ }
+ draining = false;
+}
+
+module.exports = immediate;
+function immediate(task) {
+ if (queue.push(task) === 1 && !draining) {
+ scheduleDrain();
+ }
+}
+
+}).call(this,typeof global !== "undefined" ? global : typeof self !== "undefined" ? self : typeof window !== "undefined" ? window : {})
+},{}],2:[function(_dereq_,module,exports){
+'use strict';
+var immediate = _dereq_(1);
+
+/* istanbul ignore next */
+function INTERNAL() {}
+
+var handlers = {};
+
+var REJECTED = ['REJECTED'];
+var FULFILLED = ['FULFILLED'];
+var PENDING = ['PENDING'];
+
+module.exports = Promise;
+
+function Promise(resolver) {
+ if (typeof resolver !== 'function') {
+ throw new TypeError('resolver must be a function');
+ }
+ this.state = PENDING;
+ this.queue = [];
+ this.outcome = void 0;
+ if (resolver !== INTERNAL) {
+ safelyResolveThenable(this, resolver);
+ }
+}
+
+Promise.prototype["catch"] = function (onRejected) {
+ return this.then(null, onRejected);
+};
+Promise.prototype.then = function (onFulfilled, onRejected) {
+ if (typeof onFulfilled !== 'function' && this.state === FULFILLED ||
+ typeof onRejected !== 'function' && this.state === REJECTED) {
+ return this;
+ }
+ var promise = new this.constructor(INTERNAL);
+ if (this.state !== PENDING) {
+ var resolver = this.state === FULFILLED ? onFulfilled : onRejected;
+ unwrap(promise, resolver, this.outcome);
+ } else {
+ this.queue.push(new QueueItem(promise, onFulfilled, onRejected));
+ }
+
+ return promise;
+};
+function QueueItem(promise, onFulfilled, onRejected) {
+ this.promise = promise;
+ if (typeof onFulfilled === 'function') {
+ this.onFulfilled = onFulfilled;
+ this.callFulfilled = this.otherCallFulfilled;
+ }
+ if (typeof onRejected === 'function') {
+ this.onRejected = onRejected;
+ this.callRejected = this.otherCallRejected;
+ }
+}
+QueueItem.prototype.callFulfilled = function (value) {
+ handlers.resolve(this.promise, value);
+};
+QueueItem.prototype.otherCallFulfilled = function (value) {
+ unwrap(this.promise, this.onFulfilled, value);
+};
+QueueItem.prototype.callRejected = function (value) {
+ handlers.reject(this.promise, value);
+};
+QueueItem.prototype.otherCallRejected = function (value) {
+ unwrap(this.promise, this.onRejected, value);
+};
+
+function unwrap(promise, func, value) {
+ immediate(function () {
+ var returnValue;
+ try {
+ returnValue = func(value);
+ } catch (e) {
+ return handlers.reject(promise, e);
+ }
+ if (returnValue === promise) {
+ handlers.reject(promise, new TypeError('Cannot resolve promise with itself'));
+ } else {
+ handlers.resolve(promise, returnValue);
+ }
+ });
+}
+
+handlers.resolve = function (self, value) {
+ var result = tryCatch(getThen, value);
+ if (result.status === 'error') {
+ return handlers.reject(self, result.value);
+ }
+ var thenable = result.value;
+
+ if (thenable) {
+ safelyResolveThenable(self, thenable);
+ } else {
+ self.state = FULFILLED;
+ self.outcome = value;
+ var i = -1;
+ var len = self.queue.length;
+ while (++i < len) {
+ self.queue[i].callFulfilled(value);
+ }
+ }
+ return self;
+};
+handlers.reject = function (self, error) {
+ self.state = REJECTED;
+ self.outcome = error;
+ var i = -1;
+ var len = self.queue.length;
+ while (++i < len) {
+ self.queue[i].callRejected(error);
+ }
+ return self;
+};
+
+function getThen(obj) {
+ // Make sure we only access the accessor once as required by the spec
+ var then = obj && obj.then;
+ if (obj && (typeof obj === 'object' || typeof obj === 'function') && typeof then === 'function') {
+ return function appyThen() {
+ then.apply(obj, arguments);
+ };
+ }
+}
+
+function safelyResolveThenable(self, thenable) {
+ // Either fulfill, reject or reject with error
+ var called = false;
+ function onError(value) {
+ if (called) {
+ return;
+ }
+ called = true;
+ handlers.reject(self, value);
+ }
+
+ function onSuccess(value) {
+ if (called) {
+ return;
+ }
+ called = true;
+ handlers.resolve(self, value);
+ }
+
+ function tryToUnwrap() {
+ thenable(onSuccess, onError);
+ }
+
+ var result = tryCatch(tryToUnwrap);
+ if (result.status === 'error') {
+ onError(result.value);
+ }
+}
+
+function tryCatch(func, value) {
+ var out = {};
+ try {
+ out.value = func(value);
+ out.status = 'success';
+ } catch (e) {
+ out.status = 'error';
+ out.value = e;
+ }
+ return out;
+}
+
+Promise.resolve = resolve;
+function resolve(value) {
+ if (value instanceof this) {
+ return value;
+ }
+ return handlers.resolve(new this(INTERNAL), value);
+}
+
+Promise.reject = reject;
+function reject(reason) {
+ var promise = new this(INTERNAL);
+ return handlers.reject(promise, reason);
+}
+
+Promise.all = all;
+function all(iterable) {
+ var self = this;
+ if (Object.prototype.toString.call(iterable) !== '[object Array]') {
+ return this.reject(new TypeError('must be an array'));
+ }
+
+ var len = iterable.length;
+ var called = false;
+ if (!len) {
+ return this.resolve([]);
+ }
+
+ var values = new Array(len);
+ var resolved = 0;
+ var i = -1;
+ var promise = new this(INTERNAL);
+
+ while (++i < len) {
+ allResolver(iterable[i], i);
+ }
+ return promise;
+ function allResolver(value, i) {
+ self.resolve(value).then(resolveFromAll, function (error) {
+ if (!called) {
+ called = true;
+ handlers.reject(promise, error);
+ }
+ });
+ function resolveFromAll(outValue) {
+ values[i] = outValue;
+ if (++resolved === len && !called) {
+ called = true;
+ handlers.resolve(promise, values);
+ }
+ }
+ }
+}
+
+Promise.race = race;
+function race(iterable) {
+ var self = this;
+ if (Object.prototype.toString.call(iterable) !== '[object Array]') {
+ return this.reject(new TypeError('must be an array'));
+ }
+
+ var len = iterable.length;
+ var called = false;
+ if (!len) {
+ return this.resolve([]);
+ }
+
+ var i = -1;
+ var promise = new this(INTERNAL);
+
+ while (++i < len) {
+ resolver(iterable[i]);
+ }
+ return promise;
+ function resolver(value) {
+ self.resolve(value).then(function (response) {
+ if (!called) {
+ called = true;
+ handlers.resolve(promise, response);
+ }
+ }, function (error) {
+ if (!called) {
+ called = true;
+ handlers.reject(promise, error);
+ }
+ });
+ }
+}
+
+},{"1":1}],3:[function(_dereq_,module,exports){
+(function (global){
+'use strict';
+if (typeof global.Promise !== 'function') {
+ global.Promise = _dereq_(2);
+}
+
+}).call(this,typeof global !== "undefined" ? global : typeof self !== "undefined" ? self : typeof window !== "undefined" ? window : {})
+},{"2":2}],4:[function(_dereq_,module,exports){
+'use strict';
+
+var _typeof = typeof Symbol === "function" && typeof Symbol.iterator === "symbol" ? function (obj) { return typeof obj; } : function (obj) { return obj && typeof Symbol === "function" && obj.constructor === Symbol && obj !== Symbol.prototype ? "symbol" : typeof obj; };
+
+function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
+
+function getIDB() {
+ /* global indexedDB,webkitIndexedDB,mozIndexedDB,OIndexedDB,msIndexedDB */
+ try {
+ if (typeof indexedDB !== 'undefined') {
+ return indexedDB;
+ }
+ if (typeof webkitIndexedDB !== 'undefined') {
+ return webkitIndexedDB;
+ }
+ if (typeof mozIndexedDB !== 'undefined') {
+ return mozIndexedDB;
+ }
+ if (typeof OIndexedDB !== 'undefined') {
+ return OIndexedDB;
+ }
+ if (typeof msIndexedDB !== 'undefined') {
+ return msIndexedDB;
+ }
+ } catch (e) {
+ return;
+ }
+}
+
+var idb = getIDB();
+
+function isIndexedDBValid() {
+ try {
+ // Initialize IndexedDB; fall back to vendor-prefixed versions
+ // if needed.
+ if (!idb) {
+ return false;
+ }
+ // We mimic PouchDB here;
+ //
+ // We test for openDatabase because IE Mobile identifies itself
+ // as Safari. Oh the lulz...
+ var isSafari = typeof openDatabase !== 'undefined' && /(Safari|iPhone|iPad|iPod)/.test(navigator.userAgent) && !/Chrome/.test(navigator.userAgent) && !/BlackBerry/.test(navigator.platform);
+
+ var hasFetch = typeof fetch === 'function' && fetch.toString().indexOf('[native code') !== -1;
+
+ // Safari <10.1 does not meet our requirements for IDB support (#5572)
+ // since Safari 10.1 shipped with fetch, we can use that to detect it
+ return (!isSafari || hasFetch) && typeof indexedDB !== 'undefined' &&
+ // some outdated implementations of IDB that appear on Samsung
+ // and HTC Android devices <4.4 are missing IDBKeyRange
+ // See: https://github.com/mozilla/localForage/issues/128
+ // See: https://github.com/mozilla/localForage/issues/272
+ typeof IDBKeyRange !== 'undefined';
+ } catch (e) {
+ return false;
+ }
+}
+
+// Abstracts constructing a Blob object, so it also works in older
+// browsers that don't support the native Blob constructor. (i.e.
+// old QtWebKit versions, at least).
+// Abstracts constructing a Blob object, so it also works in older
+// browsers that don't support the native Blob constructor. (i.e.
+// old QtWebKit versions, at least).
+function createBlob(parts, properties) {
+ /* global BlobBuilder,MSBlobBuilder,MozBlobBuilder,WebKitBlobBuilder */
+ parts = parts || [];
+ properties = properties || {};
+ try {
+ return new Blob(parts, properties);
+ } catch (e) {
+ if (e.name !== 'TypeError') {
+ throw e;
+ }
+ var Builder = typeof BlobBuilder !== 'undefined' ? BlobBuilder : typeof MSBlobBuilder !== 'undefined' ? MSBlobBuilder : typeof MozBlobBuilder !== 'undefined' ? MozBlobBuilder : WebKitBlobBuilder;
+ var builder = new Builder();
+ for (var i = 0; i < parts.length; i += 1) {
+ builder.append(parts[i]);
+ }
+ return builder.getBlob(properties.type);
+ }
+}
+
+// This is CommonJS because lie is an external dependency, so Rollup
+// can just ignore it.
+if (typeof Promise === 'undefined') {
+ // In the "nopromises" build this will just throw if you don't have
+ // a global promise object, but it would throw anyway later.
+ _dereq_(3);
+}
+var Promise$1 = Promise;
+
+function executeCallback(promise, callback) {
+ if (callback) {
+ promise.then(function (result) {
+ callback(null, result);
+ }, function (error) {
+ callback(error);
+ });
+ }
+}
+
+function executeTwoCallbacks(promise, callback, errorCallback) {
+ if (typeof callback === 'function') {
+ promise.then(callback);
+ }
+
+ if (typeof errorCallback === 'function') {
+ promise["catch"](errorCallback);
+ }
+}
+
+function normalizeKey(key) {
+ // Cast the key to a string, as that's all we can set as a key.
+ if (typeof key !== 'string') {
+ console.warn(key + ' used as a key, but it is not a string.');
+ key = String(key);
+ }
+
+ return key;
+}
+
+function getCallback() {
+ if (arguments.length && typeof arguments[arguments.length - 1] === 'function') {
+ return arguments[arguments.length - 1];
+ }
+}
+
+// Some code originally from async_storage.js in
+// [Gaia](https://github.com/mozilla-b2g/gaia).
+
+var DETECT_BLOB_SUPPORT_STORE = 'local-forage-detect-blob-support';
+var supportsBlobs = void 0;
+var dbContexts = {};
+var toString = Object.prototype.toString;
+
+// Transaction Modes
+var READ_ONLY = 'readonly';
+var READ_WRITE = 'readwrite';
+
+// Transform a binary string to an array buffer, because otherwise
+// weird stuff happens when you try to work with the binary string directly.
+// It is known.
+// From http://stackoverflow.com/questions/14967647/ (continues on next line)
+// encode-decode-image-with-base64-breaks-image (2013-04-21)
+function _binStringToArrayBuffer(bin) {
+ var length = bin.length;
+ var buf = new ArrayBuffer(length);
+ var arr = new Uint8Array(buf);
+ for (var i = 0; i < length; i++) {
+ arr[i] = bin.charCodeAt(i);
+ }
+ return buf;
+}
+
+//
+// Blobs are not supported in all versions of IndexedDB, notably
+// Chrome <37 and Android <5. In those versions, storing a blob will throw.
+//
+// Various other blob bugs exist in Chrome v37-42 (inclusive).
+// Detecting them is expensive and confusing to users, and Chrome 37-42
+// is at very low usage worldwide, so we do a hacky userAgent check instead.
+//
+// content-type bug: https://code.google.com/p/chromium/issues/detail?id=408120
+// 404 bug: https://code.google.com/p/chromium/issues/detail?id=447916
+// FileReader bug: https://code.google.com/p/chromium/issues/detail?id=447836
+//
+// Code borrowed from PouchDB. See:
+// https://github.com/pouchdb/pouchdb/blob/master/packages/node_modules/pouchdb-adapter-idb/src/blobSupport.js
+//
+function _checkBlobSupportWithoutCaching(idb) {
+ return new Promise$1(function (resolve) {
+ var txn = idb.transaction(DETECT_BLOB_SUPPORT_STORE, READ_WRITE);
+ var blob = createBlob(['']);
+ txn.objectStore(DETECT_BLOB_SUPPORT_STORE).put(blob, 'key');
+
+ txn.onabort = function (e) {
+ // If the transaction aborts now its due to not being able to
+ // write to the database, likely due to the disk being full
+ e.preventDefault();
+ e.stopPropagation();
+ resolve(false);
+ };
+
+ txn.oncomplete = function () {
+ var matchedChrome = navigator.userAgent.match(/Chrome\/(\d+)/);
+ var matchedEdge = navigator.userAgent.match(/Edge\//);
+ // MS Edge pretends to be Chrome 42:
+ // https://msdn.microsoft.com/en-us/library/hh869301%28v=vs.85%29.aspx
+ resolve(matchedEdge || !matchedChrome || parseInt(matchedChrome[1], 10) >= 43);
+ };
+ })["catch"](function () {
+ return false; // error, so assume unsupported
+ });
+}
+
+function _checkBlobSupport(idb) {
+ if (typeof supportsBlobs === 'boolean') {
+ return Promise$1.resolve(supportsBlobs);
+ }
+ return _checkBlobSupportWithoutCaching(idb).then(function (value) {
+ supportsBlobs = value;
+ return supportsBlobs;
+ });
+}
+
+function _deferReadiness(dbInfo) {
+ var dbContext = dbContexts[dbInfo.name];
+
+ // Create a deferred object representing the current database operation.
+ var deferredOperation = {};
+
+ deferredOperation.promise = new Promise$1(function (resolve, reject) {
+ deferredOperation.resolve = resolve;
+ deferredOperation.reject = reject;
+ });
+
+ // Enqueue the deferred operation.
+ dbContext.deferredOperations.push(deferredOperation);
+
+ // Chain its promise to the database readiness.
+ if (!dbContext.dbReady) {
+ dbContext.dbReady = deferredOperation.promise;
+ } else {
+ dbContext.dbReady = dbContext.dbReady.then(function () {
+ return deferredOperation.promise;
+ });
+ }
+}
+
+function _advanceReadiness(dbInfo) {
+ var dbContext = dbContexts[dbInfo.name];
+
+ // Dequeue a deferred operation.
+ var deferredOperation = dbContext.deferredOperations.pop();
+
+ // Resolve its promise (which is part of the database readiness
+ // chain of promises).
+ if (deferredOperation) {
+ deferredOperation.resolve();
+ return deferredOperation.promise;
+ }
+}
+
+function _rejectReadiness(dbInfo, err) {
+ var dbContext = dbContexts[dbInfo.name];
+
+ // Dequeue a deferred operation.
+ var deferredOperation = dbContext.deferredOperations.pop();
+
+ // Reject its promise (which is part of the database readiness
+ // chain of promises).
+ if (deferredOperation) {
+ deferredOperation.reject(err);
+ return deferredOperation.promise;
+ }
+}
+
+function _getConnection(dbInfo, upgradeNeeded) {
+ return new Promise$1(function (resolve, reject) {
+ dbContexts[dbInfo.name] = dbContexts[dbInfo.name] || createDbContext();
+
+ if (dbInfo.db) {
+ if (upgradeNeeded) {
+ _deferReadiness(dbInfo);
+ dbInfo.db.close();
+ } else {
+ return resolve(dbInfo.db);
+ }
+ }
+
+ var dbArgs = [dbInfo.name];
+
+ if (upgradeNeeded) {
+ dbArgs.push(dbInfo.version);
+ }
+
+ var openreq = idb.open.apply(idb, dbArgs);
+
+ if (upgradeNeeded) {
+ openreq.onupgradeneeded = function (e) {
+ var db = openreq.result;
+ try {
+ db.createObjectStore(dbInfo.storeName);
+ if (e.oldVersion <= 1) {
+ // Added when support for blob shims was added
+ db.createObjectStore(DETECT_BLOB_SUPPORT_STORE);
+ }
+ } catch (ex) {
+ if (ex.name === 'ConstraintError') {
+ console.warn('The database "' + dbInfo.name + '"' + ' has been upgraded from version ' + e.oldVersion + ' to version ' + e.newVersion + ', but the storage "' + dbInfo.storeName + '" already exists.');
+ } else {
+ throw ex;
+ }
+ }
+ };
+ }
+
+ openreq.onerror = function (e) {
+ e.preventDefault();
+ reject(openreq.error);
+ };
+
+ openreq.onsuccess = function () {
+ resolve(openreq.result);
+ _advanceReadiness(dbInfo);
+ };
+ });
+}
+
+function _getOriginalConnection(dbInfo) {
+ return _getConnection(dbInfo, false);
+}
+
+function _getUpgradedConnection(dbInfo) {
+ return _getConnection(dbInfo, true);
+}
+
+function _isUpgradeNeeded(dbInfo, defaultVersion) {
+ if (!dbInfo.db) {
+ return true;
+ }
+
+ var isNewStore = !dbInfo.db.objectStoreNames.contains(dbInfo.storeName);
+ var isDowngrade = dbInfo.version < dbInfo.db.version;
+ var isUpgrade = dbInfo.version > dbInfo.db.version;
+
+ if (isDowngrade) {
+ // If the version is not the default one
+ // then warn for impossible downgrade.
+ if (dbInfo.version !== defaultVersion) {
+ console.warn('The database "' + dbInfo.name + '"' + " can't be downgraded from version " + dbInfo.db.version + ' to version ' + dbInfo.version + '.');
+ }
+ // Align the versions to prevent errors.
+ dbInfo.version = dbInfo.db.version;
+ }
+
+ if (isUpgrade || isNewStore) {
+ // If the store is new then increment the version (if needed).
+ // This will trigger an "upgradeneeded" event which is required
+ // for creating a store.
+ if (isNewStore) {
+ var incVersion = dbInfo.db.version + 1;
+ if (incVersion > dbInfo.version) {
+ dbInfo.version = incVersion;
+ }
+ }
+
+ return true;
+ }
+
+ return false;
+}
+
+// encode a blob for indexeddb engines that don't support blobs
+function _encodeBlob(blob) {
+ return new Promise$1(function (resolve, reject) {
+ var reader = new FileReader();
+ reader.onerror = reject;
+ reader.onloadend = function (e) {
+ var base64 = btoa(e.target.result || '');
+ resolve({
+ __local_forage_encoded_blob: true,
+ data: base64,
+ type: blob.type
+ });
+ };
+ reader.readAsBinaryString(blob);
+ });
+}
+
+// decode an encoded blob
+function _decodeBlob(encodedBlob) {
+ var arrayBuff = _binStringToArrayBuffer(atob(encodedBlob.data));
+ return createBlob([arrayBuff], { type: encodedBlob.type });
+}
+
+// is this one of our fancy encoded blobs?
+function _isEncodedBlob(value) {
+ return value && value.__local_forage_encoded_blob;
+}
+
+// Specialize the default `ready()` function by making it dependent
+// on the current database operations. Thus, the driver will be actually
+// ready when it's been initialized (default) *and* there are no pending
+// operations on the database (initiated by some other instances).
+function _fullyReady(callback) {
+ var self = this;
+
+ var promise = self._initReady().then(function () {
+ var dbContext = dbContexts[self._dbInfo.name];
+
+ if (dbContext && dbContext.dbReady) {
+ return dbContext.dbReady;
+ }
+ });
+
+ executeTwoCallbacks(promise, callback, callback);
+ return promise;
+}
+
+// Try to establish a new db connection to replace the
+// current one which is broken (i.e. experiencing
+// InvalidStateError while creating a transaction).
+function _tryReconnect(dbInfo) {
+ _deferReadiness(dbInfo);
+
+ var dbContext = dbContexts[dbInfo.name];
+ var forages = dbContext.forages;
+
+ for (var i = 0; i < forages.length; i++) {
+ var forage = forages[i];
+ if (forage._dbInfo.db) {
+ forage._dbInfo.db.close();
+ forage._dbInfo.db = null;
+ }
+ }
+ dbInfo.db = null;
+
+ return _getOriginalConnection(dbInfo).then(function (db) {
+ dbInfo.db = db;
+ if (_isUpgradeNeeded(dbInfo)) {
+ // Reopen the database for upgrading.
+ return _getUpgradedConnection(dbInfo);
+ }
+ return db;
+ }).then(function (db) {
+ // store the latest db reference
+ // in case the db was upgraded
+ dbInfo.db = dbContext.db = db;
+ for (var i = 0; i < forages.length; i++) {
+ forages[i]._dbInfo.db = db;
+ }
+ })["catch"](function (err) {
+ _rejectReadiness(dbInfo, err);
+ throw err;
+ });
+}
+
+// FF doesn't like Promises (micro-tasks) and IDDB store operations,
+// so we have to do it with callbacks
+function createTransaction(dbInfo, mode, callback, retries) {
+ if (retries === undefined) {
+ retries = 1;
+ }
+
+ try {
+ var tx = dbInfo.db.transaction(dbInfo.storeName, mode);
+ callback(null, tx);
+ } catch (err) {
+ if (retries > 0 && (!dbInfo.db || err.name === 'InvalidStateError' || err.name === 'NotFoundError')) {
+ return Promise$1.resolve().then(function () {
+ if (!dbInfo.db || err.name === 'NotFoundError' && !dbInfo.db.objectStoreNames.contains(dbInfo.storeName) && dbInfo.version <= dbInfo.db.version) {
+ // increase the db version, to create the new ObjectStore
+ if (dbInfo.db) {
+ dbInfo.version = dbInfo.db.version + 1;
+ }
+ // Reopen the database for upgrading.
+ return _getUpgradedConnection(dbInfo);
+ }
+ }).then(function () {
+ return _tryReconnect(dbInfo).then(function () {
+ createTransaction(dbInfo, mode, callback, retries - 1);
+ });
+ })["catch"](callback);
+ }
+
+ callback(err);
+ }
+}
+
+function createDbContext() {
+ return {
+ // Running localForages sharing a database.
+ forages: [],
+ // Shared database.
+ db: null,
+ // Database readiness (promise).
+ dbReady: null,
+ // Deferred operations on the database.
+ deferredOperations: []
+ };
+}
+
+// Open the IndexedDB database (automatically creates one if one didn't
+// previously exist), using any options set in the config.
+function _initStorage(options) {
+ var self = this;
+ var dbInfo = {
+ db: null
+ };
+
+ if (options) {
+ for (var i in options) {
+ dbInfo[i] = options[i];
+ }
+ }
+
+ // Get the current context of the database;
+ var dbContext = dbContexts[dbInfo.name];
+
+ // ...or create a new context.
+ if (!dbContext) {
+ dbContext = createDbContext();
+ // Register the new context in the global container.
+ dbContexts[dbInfo.name] = dbContext;
+ }
+
+ // Register itself as a running localForage in the current context.
+ dbContext.forages.push(self);
+
+ // Replace the default `ready()` function with the specialized one.
+ if (!self._initReady) {
+ self._initReady = self.ready;
+ self.ready = _fullyReady;
+ }
+
+ // Create an array of initialization states of the related localForages.
+ var initPromises = [];
+
+ function ignoreErrors() {
+ // Don't handle errors here,
+ // just makes sure related localForages aren't pending.
+ return Promise$1.resolve();
+ }
+
+ for (var j = 0; j < dbContext.forages.length; j++) {
+ var forage = dbContext.forages[j];
+ if (forage !== self) {
+ // Don't wait for itself...
+ initPromises.push(forage._initReady()["catch"](ignoreErrors));
+ }
+ }
+
+ // Take a snapshot of the related localForages.
+ var forages = dbContext.forages.slice(0);
+
+ // Initialize the connection process only when
+ // all the related localForages aren't pending.
+ return Promise$1.all(initPromises).then(function () {
+ dbInfo.db = dbContext.db;
+ // Get the connection or open a new one without upgrade.
+ return _getOriginalConnection(dbInfo);
+ }).then(function (db) {
+ dbInfo.db = db;
+ if (_isUpgradeNeeded(dbInfo, self._defaultConfig.version)) {
+ // Reopen the database for upgrading.
+ return _getUpgradedConnection(dbInfo);
+ }
+ return db;
+ }).then(function (db) {
+ dbInfo.db = dbContext.db = db;
+ self._dbInfo = dbInfo;
+ // Share the final connection amongst related localForages.
+ for (var k = 0; k < forages.length; k++) {
+ var forage = forages[k];
+ if (forage !== self) {
+ // Self is already up-to-date.
+ forage._dbInfo.db = dbInfo.db;
+ forage._dbInfo.version = dbInfo.version;
+ }
+ }
+ });
+}
+
+function getItem(key, callback) {
+ var self = this;
+
+ key = normalizeKey(key);
+
+ var promise = new Promise$1(function (resolve, reject) {
+ self.ready().then(function () {
+ createTransaction(self._dbInfo, READ_ONLY, function (err, transaction) {
+ if (err) {
+ return reject(err);
+ }
+
+ try {
+ var store = transaction.objectStore(self._dbInfo.storeName);
+ var req = store.get(key);
+
+ req.onsuccess = function () {
+ var value = req.result;
+ if (value === undefined) {
+ value = null;
+ }
+ if (_isEncodedBlob(value)) {
+ value = _decodeBlob(value);
+ }
+ resolve(value);
+ };
+
+ req.onerror = function () {
+ reject(req.error);
+ };
+ } catch (e) {
+ reject(e);
+ }
+ });
+ })["catch"](reject);
+ });
+
+ executeCallback(promise, callback);
+ return promise;
+}
+
+// Iterate over all items stored in database.
+function iterate(iterator, callback) {
+ var self = this;
+
+ var promise = new Promise$1(function (resolve, reject) {
+ self.ready().then(function () {
+ createTransaction(self._dbInfo, READ_ONLY, function (err, transaction) {
+ if (err) {
+ return reject(err);
+ }
+
+ try {
+ var store = transaction.objectStore(self._dbInfo.storeName);
+ var req = store.openCursor();
+ var iterationNumber = 1;
+
+ req.onsuccess = function () {
+ var cursor = req.result;
+
+ if (cursor) {
+ var value = cursor.value;
+ if (_isEncodedBlob(value)) {
+ value = _decodeBlob(value);
+ }
+ var result = iterator(value, cursor.key, iterationNumber++);
+
+ // when the iterator callback retuns any
+ // (non-`undefined`) value, then we stop
+ // the iteration immediately
+ if (result !== void 0) {
+ resolve(result);
+ } else {
+ cursor["continue"]();
+ }
+ } else {
+ resolve();
+ }
+ };
+
+ req.onerror = function () {
+ reject(req.error);
+ };
+ } catch (e) {
+ reject(e);
+ }
+ });
+ })["catch"](reject);
+ });
+
+ executeCallback(promise, callback);
+
+ return promise;
+}
+
+function setItem(key, value, callback) {
+ var self = this;
+
+ key = normalizeKey(key);
+
+ var promise = new Promise$1(function (resolve, reject) {
+ var dbInfo;
+ self.ready().then(function () {
+ dbInfo = self._dbInfo;
+ if (toString.call(value) === '[object Blob]') {
+ return _checkBlobSupport(dbInfo.db).then(function (blobSupport) {
+ if (blobSupport) {
+ return value;
+ }
+ return _encodeBlob(value);
+ });
+ }
+ return value;
+ }).then(function (value) {
+ createTransaction(self._dbInfo, READ_WRITE, function (err, transaction) {
+ if (err) {
+ return reject(err);
+ }
+
+ try {
+ var store = transaction.objectStore(self._dbInfo.storeName);
+
+ // The reason we don't _save_ null is because IE 10 does
+ // not support saving the `null` type in IndexedDB. How
+ // ironic, given the bug below!
+ // See: https://github.com/mozilla/localForage/issues/161
+ if (value === null) {
+ value = undefined;
+ }
+
+ var req = store.put(value, key);
+
+ transaction.oncomplete = function () {
+ // Cast to undefined so the value passed to
+ // callback/promise is the same as what one would get out
+ // of `getItem()` later. This leads to some weirdness
+ // (setItem('foo', undefined) will return `null`), but
+ // it's not my fault localStorage is our baseline and that
+ // it's weird.
+ if (value === undefined) {
+ value = null;
+ }
+
+ resolve(value);
+ };
+ transaction.onabort = transaction.onerror = function () {
+ var err = req.error ? req.error : req.transaction.error;
+ reject(err);
+ };
+ } catch (e) {
+ reject(e);
+ }
+ });
+ })["catch"](reject);
+ });
+
+ executeCallback(promise, callback);
+ return promise;
+}
+
+function removeItem(key, callback) {
+ var self = this;
+
+ key = normalizeKey(key);
+
+ var promise = new Promise$1(function (resolve, reject) {
+ self.ready().then(function () {
+ createTransaction(self._dbInfo, READ_WRITE, function (err, transaction) {
+ if (err) {
+ return reject(err);
+ }
+
+ try {
+ var store = transaction.objectStore(self._dbInfo.storeName);
+ // We use a Grunt task to make this safe for IE and some
+ // versions of Android (including those used by Cordova).
+ // Normally IE won't like `.delete()` and will insist on
+ // using `['delete']()`, but we have a build step that
+ // fixes this for us now.
+ var req = store["delete"](key);
+ transaction.oncomplete = function () {
+ resolve();
+ };
+
+ transaction.onerror = function () {
+ reject(req.error);
+ };
+
+ // The request will be also be aborted if we've exceeded our storage
+ // space.
+ transaction.onabort = function () {
+ var err = req.error ? req.error : req.transaction.error;
+ reject(err);
+ };
+ } catch (e) {
+ reject(e);
+ }
+ });
+ })["catch"](reject);
+ });
+
+ executeCallback(promise, callback);
+ return promise;
+}
+
+function clear(callback) {
+ var self = this;
+
+ var promise = new Promise$1(function (resolve, reject) {
+ self.ready().then(function () {
+ createTransaction(self._dbInfo, READ_WRITE, function (err, transaction) {
+ if (err) {
+ return reject(err);
+ }
+
+ try {
+ var store = transaction.objectStore(self._dbInfo.storeName);
+ var req = store.clear();
+
+ transaction.oncomplete = function () {
+ resolve();
+ };
+
+ transaction.onabort = transaction.onerror = function () {
+ var err = req.error ? req.error : req.transaction.error;
+ reject(err);
+ };
+ } catch (e) {
+ reject(e);
+ }
+ });
+ })["catch"](reject);
+ });
+
+ executeCallback(promise, callback);
+ return promise;
+}
+
+function length(callback) {
+ var self = this;
+
+ var promise = new Promise$1(function (resolve, reject) {
+ self.ready().then(function () {
+ createTransaction(self._dbInfo, READ_ONLY, function (err, transaction) {
+ if (err) {
+ return reject(err);
+ }
+
+ try {
+ var store = transaction.objectStore(self._dbInfo.storeName);
+ var req = store.count();
+
+ req.onsuccess = function () {
+ resolve(req.result);
+ };
+
+ req.onerror = function () {
+ reject(req.error);
+ };
+ } catch (e) {
+ reject(e);
+ }
+ });
+ })["catch"](reject);
+ });
+
+ executeCallback(promise, callback);
+ return promise;
+}
+
+function key(n, callback) {
+ var self = this;
+
+ var promise = new Promise$1(function (resolve, reject) {
+ if (n < 0) {
+ resolve(null);
+
+ return;
+ }
+
+ self.ready().then(function () {
+ createTransaction(self._dbInfo, READ_ONLY, function (err, transaction) {
+ if (err) {
+ return reject(err);
+ }
+
+ try {
+ var store = transaction.objectStore(self._dbInfo.storeName);
+ var advanced = false;
+ var req = store.openCursor();
+
+ req.onsuccess = function () {
+ var cursor = req.result;
+ if (!cursor) {
+ // this means there weren't enough keys
+ resolve(null);
+
+ return;
+ }
+
+ if (n === 0) {
+ // We have the first key, return it if that's what they
+ // wanted.
+ resolve(cursor.key);
+ } else {
+ if (!advanced) {
+ // Otherwise, ask the cursor to skip ahead n
+ // records.
+ advanced = true;
+ cursor.advance(n);
+ } else {
+ // When we get here, we've got the nth key.
+ resolve(cursor.key);
+ }
+ }
+ };
+
+ req.onerror = function () {
+ reject(req.error);
+ };
+ } catch (e) {
+ reject(e);
+ }
+ });
+ })["catch"](reject);
+ });
+
+ executeCallback(promise, callback);
+ return promise;
+}
+
+function keys(callback) {
+ var self = this;
+
+ var promise = new Promise$1(function (resolve, reject) {
+ self.ready().then(function () {
+ createTransaction(self._dbInfo, READ_ONLY, function (err, transaction) {
+ if (err) {
+ return reject(err);
+ }
+
+ try {
+ var store = transaction.objectStore(self._dbInfo.storeName);
+ var req = store.openCursor();
+ var keys = [];
+
+ req.onsuccess = function () {
+ var cursor = req.result;
+
+ if (!cursor) {
+ resolve(keys);
+ return;
+ }
+
+ keys.push(cursor.key);
+ cursor["continue"]();
+ };
+
+ req.onerror = function () {
+ reject(req.error);
+ };
+ } catch (e) {
+ reject(e);
+ }
+ });
+ })["catch"](reject);
+ });
+
+ executeCallback(promise, callback);
+ return promise;
+}
+
+function dropInstance(options, callback) {
+ callback = getCallback.apply(this, arguments);
+
+ var currentConfig = this.config();
+ options = typeof options !== 'function' && options || {};
+ if (!options.name) {
+ options.name = options.name || currentConfig.name;
+ options.storeName = options.storeName || currentConfig.storeName;
+ }
+
+ var self = this;
+ var promise;
+ if (!options.name) {
+ promise = Promise$1.reject('Invalid arguments');
+ } else {
+ var isCurrentDb = options.name === currentConfig.name && self._dbInfo.db;
+
+ var dbPromise = isCurrentDb ? Promise$1.resolve(self._dbInfo.db) : _getOriginalConnection(options).then(function (db) {
+ var dbContext = dbContexts[options.name];
+ var forages = dbContext.forages;
+ dbContext.db = db;
+ for (var i = 0; i < forages.length; i++) {
+ forages[i]._dbInfo.db = db;
+ }
+ return db;
+ });
+
+ if (!options.storeName) {
+ promise = dbPromise.then(function (db) {
+ _deferReadiness(options);
+
+ var dbContext = dbContexts[options.name];
+ var forages = dbContext.forages;
+
+ db.close();
+ for (var i = 0; i < forages.length; i++) {
+ var forage = forages[i];
+ forage._dbInfo.db = null;
+ }
+
+ var dropDBPromise = new Promise$1(function (resolve, reject) {
+ var req = idb.deleteDatabase(options.name);
+
+ req.onerror = req.onblocked = function (err) {
+ var db = req.result;
+ if (db) {
+ db.close();
+ }
+ reject(err);
+ };
+
+ req.onsuccess = function () {
+ var db = req.result;
+ if (db) {
+ db.close();
+ }
+ resolve(db);
+ };
+ });
+
+ return dropDBPromise.then(function (db) {
+ dbContext.db = db;
+ for (var i = 0; i < forages.length; i++) {
+ var _forage = forages[i];
+ _advanceReadiness(_forage._dbInfo);
+ }
+ })["catch"](function (err) {
+ (_rejectReadiness(options, err) || Promise$1.resolve())["catch"](function () {});
+ throw err;
+ });
+ });
+ } else {
+ promise = dbPromise.then(function (db) {
+ if (!db.objectStoreNames.contains(options.storeName)) {
+ return;
+ }
+
+ var newVersion = db.version + 1;
+
+ _deferReadiness(options);
+
+ var dbContext = dbContexts[options.name];
+ var forages = dbContext.forages;
+
+ db.close();
+ for (var i = 0; i < forages.length; i++) {
+ var forage = forages[i];
+ forage._dbInfo.db = null;
+ forage._dbInfo.version = newVersion;
+ }
+
+ var dropObjectPromise = new Promise$1(function (resolve, reject) {
+ var req = idb.open(options.name, newVersion);
+
+ req.onerror = function (err) {
+ var db = req.result;
+ db.close();
+ reject(err);
+ };
+
+ req.onupgradeneeded = function () {
+ var db = req.result;
+ db.deleteObjectStore(options.storeName);
+ };
+
+ req.onsuccess = function () {
+ var db = req.result;
+ db.close();
+ resolve(db);
+ };
+ });
+
+ return dropObjectPromise.then(function (db) {
+ dbContext.db = db;
+ for (var j = 0; j < forages.length; j++) {
+ var _forage2 = forages[j];
+ _forage2._dbInfo.db = db;
+ _advanceReadiness(_forage2._dbInfo);
+ }
+ })["catch"](function (err) {
+ (_rejectReadiness(options, err) || Promise$1.resolve())["catch"](function () {});
+ throw err;
+ });
+ });
+ }
+ }
+
+ executeCallback(promise, callback);
+ return promise;
+}
+
+var asyncStorage = {
+ _driver: 'asyncStorage',
+ _initStorage: _initStorage,
+ _support: isIndexedDBValid(),
+ iterate: iterate,
+ getItem: getItem,
+ setItem: setItem,
+ removeItem: removeItem,
+ clear: clear,
+ length: length,
+ key: key,
+ keys: keys,
+ dropInstance: dropInstance
+};
+
+function isWebSQLValid() {
+ return typeof openDatabase === 'function';
+}
+
+// Sadly, the best way to save binary data in WebSQL/localStorage is serializing
+// it to Base64, so this is how we store it to prevent very strange errors with less
+// verbose ways of binary <-> string data storage.
+var BASE_CHARS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/';
+
+var BLOB_TYPE_PREFIX = '~~local_forage_type~';
+var BLOB_TYPE_PREFIX_REGEX = /^~~local_forage_type~([^~]+)~/;
+
+var SERIALIZED_MARKER = '__lfsc__:';
+var SERIALIZED_MARKER_LENGTH = SERIALIZED_MARKER.length;
+
+// OMG the serializations!
+var TYPE_ARRAYBUFFER = 'arbf';
+var TYPE_BLOB = 'blob';
+var TYPE_INT8ARRAY = 'si08';
+var TYPE_UINT8ARRAY = 'ui08';
+var TYPE_UINT8CLAMPEDARRAY = 'uic8';
+var TYPE_INT16ARRAY = 'si16';
+var TYPE_INT32ARRAY = 'si32';
+var TYPE_UINT16ARRAY = 'ur16';
+var TYPE_UINT32ARRAY = 'ui32';
+var TYPE_FLOAT32ARRAY = 'fl32';
+var TYPE_FLOAT64ARRAY = 'fl64';
+var TYPE_SERIALIZED_MARKER_LENGTH = SERIALIZED_MARKER_LENGTH + TYPE_ARRAYBUFFER.length;
+
+var toString$1 = Object.prototype.toString;
+
+function stringToBuffer(serializedString) {
+ // Fill the string into a ArrayBuffer.
+ var bufferLength = serializedString.length * 0.75;
+ var len = serializedString.length;
+ var i;
+ var p = 0;
+ var encoded1, encoded2, encoded3, encoded4;
+
+ if (serializedString[serializedString.length - 1] === '=') {
+ bufferLength--;
+ if (serializedString[serializedString.length - 2] === '=') {
+ bufferLength--;
+ }
+ }
+
+ var buffer = new ArrayBuffer(bufferLength);
+ var bytes = new Uint8Array(buffer);
+
+ for (i = 0; i < len; i += 4) {
+ encoded1 = BASE_CHARS.indexOf(serializedString[i]);
+ encoded2 = BASE_CHARS.indexOf(serializedString[i + 1]);
+ encoded3 = BASE_CHARS.indexOf(serializedString[i + 2]);
+ encoded4 = BASE_CHARS.indexOf(serializedString[i + 3]);
+
+ /*jslint bitwise: true */
+ bytes[p++] = encoded1 << 2 | encoded2 >> 4;
+ bytes[p++] = (encoded2 & 15) << 4 | encoded3 >> 2;
+ bytes[p++] = (encoded3 & 3) << 6 | encoded4 & 63;
+ }
+ return buffer;
+}
+
+// Converts a buffer to a string to store, serialized, in the backend
+// storage library.
+function bufferToString(buffer) {
+ // base64-arraybuffer
+ var bytes = new Uint8Array(buffer);
+ var base64String = '';
+ var i;
+
+ for (i = 0; i < bytes.length; i += 3) {
+ /*jslint bitwise: true */
+ base64String += BASE_CHARS[bytes[i] >> 2];
+ base64String += BASE_CHARS[(bytes[i] & 3) << 4 | bytes[i + 1] >> 4];
+ base64String += BASE_CHARS[(bytes[i + 1] & 15) << 2 | bytes[i + 2] >> 6];
+ base64String += BASE_CHARS[bytes[i + 2] & 63];
+ }
+
+ if (bytes.length % 3 === 2) {
+ base64String = base64String.substring(0, base64String.length - 1) + '=';
+ } else if (bytes.length % 3 === 1) {
+ base64String = base64String.substring(0, base64String.length - 2) + '==';
+ }
+
+ return base64String;
+}
+
+// Serialize a value, afterwards executing a callback (which usually
+// instructs the `setItem()` callback/promise to be executed). This is how
+// we store binary data with localStorage.
+function serialize(value, callback) {
+ var valueType = '';
+ if (value) {
+ valueType = toString$1.call(value);
+ }
+
+ // Cannot use `value instanceof ArrayBuffer` or such here, as these
+ // checks fail when running the tests using casper.js...
+ //
+ // TODO: See why those tests fail and use a better solution.
+ if (value && (valueType === '[object ArrayBuffer]' || value.buffer && toString$1.call(value.buffer) === '[object ArrayBuffer]')) {
+ // Convert binary arrays to a string and prefix the string with
+ // a special marker.
+ var buffer;
+ var marker = SERIALIZED_MARKER;
+
+ if (value instanceof ArrayBuffer) {
+ buffer = value;
+ marker += TYPE_ARRAYBUFFER;
+ } else {
+ buffer = value.buffer;
+
+ if (valueType === '[object Int8Array]') {
+ marker += TYPE_INT8ARRAY;
+ } else if (valueType === '[object Uint8Array]') {
+ marker += TYPE_UINT8ARRAY;
+ } else if (valueType === '[object Uint8ClampedArray]') {
+ marker += TYPE_UINT8CLAMPEDARRAY;
+ } else if (valueType === '[object Int16Array]') {
+ marker += TYPE_INT16ARRAY;
+ } else if (valueType === '[object Uint16Array]') {
+ marker += TYPE_UINT16ARRAY;
+ } else if (valueType === '[object Int32Array]') {
+ marker += TYPE_INT32ARRAY;
+ } else if (valueType === '[object Uint32Array]') {
+ marker += TYPE_UINT32ARRAY;
+ } else if (valueType === '[object Float32Array]') {
+ marker += TYPE_FLOAT32ARRAY;
+ } else if (valueType === '[object Float64Array]') {
+ marker += TYPE_FLOAT64ARRAY;
+ } else {
+ callback(new Error('Failed to get type for BinaryArray'));
+ }
+ }
+
+ callback(marker + bufferToString(buffer));
+ } else if (valueType === '[object Blob]') {
+ // Conver the blob to a binaryArray and then to a string.
+ var fileReader = new FileReader();
+
+ fileReader.onload = function () {
+ // Backwards-compatible prefix for the blob type.
+ var str = BLOB_TYPE_PREFIX + value.type + '~' + bufferToString(this.result);
+
+ callback(SERIALIZED_MARKER + TYPE_BLOB + str);
+ };
+
+ fileReader.readAsArrayBuffer(value);
+ } else {
+ try {
+ callback(JSON.stringify(value));
+ } catch (e) {
+ console.error("Couldn't convert value into a JSON string: ", value);
+
+ callback(null, e);
+ }
+ }
+}
+
+// Deserialize data we've inserted into a value column/field. We place
+// special markers into our strings to mark them as encoded; this isn't
+// as nice as a meta field, but it's the only sane thing we can do whilst
+// keeping localStorage support intact.
+//
+// Oftentimes this will just deserialize JSON content, but if we have a
+// special marker (SERIALIZED_MARKER, defined above), we will extract
+// some kind of arraybuffer/binary data/typed array out of the string.
+function deserialize(value) {
+ // If we haven't marked this string as being specially serialized (i.e.
+ // something other than serialized JSON), we can just return it and be
+ // done with it.
+ if (value.substring(0, SERIALIZED_MARKER_LENGTH) !== SERIALIZED_MARKER) {
+ return JSON.parse(value);
+ }
+
+ // The following code deals with deserializing some kind of Blob or
+ // TypedArray. First we separate out the type of data we're dealing
+ // with from the data itself.
+ var serializedString = value.substring(TYPE_SERIALIZED_MARKER_LENGTH);
+ var type = value.substring(SERIALIZED_MARKER_LENGTH, TYPE_SERIALIZED_MARKER_LENGTH);
+
+ var blobType;
+ // Backwards-compatible blob type serialization strategy.
+ // DBs created with older versions of localForage will simply not have the blob type.
+ if (type === TYPE_BLOB && BLOB_TYPE_PREFIX_REGEX.test(serializedString)) {
+ var matcher = serializedString.match(BLOB_TYPE_PREFIX_REGEX);
+ blobType = matcher[1];
+ serializedString = serializedString.substring(matcher[0].length);
+ }
+ var buffer = stringToBuffer(serializedString);
+
+ // Return the right type based on the code/type set during
+ // serialization.
+ switch (type) {
+ case TYPE_ARRAYBUFFER:
+ return buffer;
+ case TYPE_BLOB:
+ return createBlob([buffer], { type: blobType });
+ case TYPE_INT8ARRAY:
+ return new Int8Array(buffer);
+ case TYPE_UINT8ARRAY:
+ return new Uint8Array(buffer);
+ case TYPE_UINT8CLAMPEDARRAY:
+ return new Uint8ClampedArray(buffer);
+ case TYPE_INT16ARRAY:
+ return new Int16Array(buffer);
+ case TYPE_UINT16ARRAY:
+ return new Uint16Array(buffer);
+ case TYPE_INT32ARRAY:
+ return new Int32Array(buffer);
+ case TYPE_UINT32ARRAY:
+ return new Uint32Array(buffer);
+ case TYPE_FLOAT32ARRAY:
+ return new Float32Array(buffer);
+ case TYPE_FLOAT64ARRAY:
+ return new Float64Array(buffer);
+ default:
+ throw new Error('Unkown type: ' + type);
+ }
+}
+
+var localforageSerializer = {
+ serialize: serialize,
+ deserialize: deserialize,
+ stringToBuffer: stringToBuffer,
+ bufferToString: bufferToString
+};
+
+/*
+ * Includes code from:
+ *
+ * base64-arraybuffer
+ * https://github.com/niklasvh/base64-arraybuffer
+ *
+ * Copyright (c) 2012 Niklas von Hertzen
+ * Licensed under the MIT license.
+ */
+
+function createDbTable(t, dbInfo, callback, errorCallback) {
+ t.executeSql('CREATE TABLE IF NOT EXISTS ' + dbInfo.storeName + ' ' + '(id INTEGER PRIMARY KEY, key unique, value)', [], callback, errorCallback);
+}
+
+// Open the WebSQL database (automatically creates one if one didn't
+// previously exist), using any options set in the config.
+function _initStorage$1(options) {
+ var self = this;
+ var dbInfo = {
+ db: null
+ };
+
+ if (options) {
+ for (var i in options) {
+ dbInfo[i] = typeof options[i] !== 'string' ? options[i].toString() : options[i];
+ }
+ }
+
+ var dbInfoPromise = new Promise$1(function (resolve, reject) {
+ // Open the database; the openDatabase API will automatically
+ // create it for us if it doesn't exist.
+ try {
+ dbInfo.db = openDatabase(dbInfo.name, String(dbInfo.version), dbInfo.description, dbInfo.size);
+ } catch (e) {
+ return reject(e);
+ }
+
+ // Create our key/value table if it doesn't exist.
+ dbInfo.db.transaction(function (t) {
+ createDbTable(t, dbInfo, function () {
+ self._dbInfo = dbInfo;
+ resolve();
+ }, function (t, error) {
+ reject(error);
+ });
+ }, reject);
+ });
+
+ dbInfo.serializer = localforageSerializer;
+ return dbInfoPromise;
+}
+
+function tryExecuteSql(t, dbInfo, sqlStatement, args, callback, errorCallback) {
+ t.executeSql(sqlStatement, args, callback, function (t, error) {
+ if (error.code === error.SYNTAX_ERR) {
+ t.executeSql('SELECT name FROM sqlite_master ' + "WHERE type='table' AND name = ?", [dbInfo.storeName], function (t, results) {
+ if (!results.rows.length) {
+ // if the table is missing (was deleted)
+ // re-create it table and retry
+ createDbTable(t, dbInfo, function () {
+ t.executeSql(sqlStatement, args, callback, errorCallback);
+ }, errorCallback);
+ } else {
+ errorCallback(t, error);
+ }
+ }, errorCallback);
+ } else {
+ errorCallback(t, error);
+ }
+ }, errorCallback);
+}
+
+function getItem$1(key, callback) {
+ var self = this;
+
+ key = normalizeKey(key);
+
+ var promise = new Promise$1(function (resolve, reject) {
+ self.ready().then(function () {
+ var dbInfo = self._dbInfo;
+ dbInfo.db.transaction(function (t) {
+ tryExecuteSql(t, dbInfo, 'SELECT * FROM ' + dbInfo.storeName + ' WHERE key = ? LIMIT 1', [key], function (t, results) {
+ var result = results.rows.length ? results.rows.item(0).value : null;
+
+ // Check to see if this is serialized content we need to
+ // unpack.
+ if (result) {
+ result = dbInfo.serializer.deserialize(result);
+ }
+
+ resolve(result);
+ }, function (t, error) {
+ reject(error);
+ });
+ });
+ })["catch"](reject);
+ });
+
+ executeCallback(promise, callback);
+ return promise;
+}
+
+function iterate$1(iterator, callback) {
+ var self = this;
+
+ var promise = new Promise$1(function (resolve, reject) {
+ self.ready().then(function () {
+ var dbInfo = self._dbInfo;
+
+ dbInfo.db.transaction(function (t) {
+ tryExecuteSql(t, dbInfo, 'SELECT * FROM ' + dbInfo.storeName, [], function (t, results) {
+ var rows = results.rows;
+ var length = rows.length;
+
+ for (var i = 0; i < length; i++) {
+ var item = rows.item(i);
+ var result = item.value;
+
+ // Check to see if this is serialized content
+ // we need to unpack.
+ if (result) {
+ result = dbInfo.serializer.deserialize(result);
+ }
+
+ result = iterator(result, item.key, i + 1);
+
+ // void(0) prevents problems with redefinition
+ // of `undefined`.
+ if (result !== void 0) {
+ resolve(result);
+ return;
+ }
+ }
+
+ resolve();
+ }, function (t, error) {
+ reject(error);
+ });
+ });
+ })["catch"](reject);
+ });
+
+ executeCallback(promise, callback);
+ return promise;
+}
+
+function _setItem(key, value, callback, retriesLeft) {
+ var self = this;
+
+ key = normalizeKey(key);
+
+ var promise = new Promise$1(function (resolve, reject) {
+ self.ready().then(function () {
+ // The localStorage API doesn't return undefined values in an
+ // "expected" way, so undefined is always cast to null in all
+ // drivers. See: https://github.com/mozilla/localForage/pull/42
+ if (value === undefined) {
+ value = null;
+ }
+
+ // Save the original value to pass to the callback.
+ var originalValue = value;
+
+ var dbInfo = self._dbInfo;
+ dbInfo.serializer.serialize(value, function (value, error) {
+ if (error) {
+ reject(error);
+ } else {
+ dbInfo.db.transaction(function (t) {
+ tryExecuteSql(t, dbInfo, 'INSERT OR REPLACE INTO ' + dbInfo.storeName + ' ' + '(key, value) VALUES (?, ?)', [key, value], function () {
+ resolve(originalValue);
+ }, function (t, error) {
+ reject(error);
+ });
+ }, function (sqlError) {
+ // The transaction failed; check
+ // to see if it's a quota error.
+ if (sqlError.code === sqlError.QUOTA_ERR) {
+ // We reject the callback outright for now, but
+ // it's worth trying to re-run the transaction.
+ // Even if the user accepts the prompt to use
+ // more storage on Safari, this error will
+ // be called.
+ //
+ // Try to re-run the transaction.
+ if (retriesLeft > 0) {
+ resolve(_setItem.apply(self, [key, originalValue, callback, retriesLeft - 1]));
+ return;
+ }
+ reject(sqlError);
+ }
+ });
+ }
+ });
+ })["catch"](reject);
+ });
+
+ executeCallback(promise, callback);
+ return promise;
+}
+
+function setItem$1(key, value, callback) {
+ return _setItem.apply(this, [key, value, callback, 1]);
+}
+
+function removeItem$1(key, callback) {
+ var self = this;
+
+ key = normalizeKey(key);
+
+ var promise = new Promise$1(function (resolve, reject) {
+ self.ready().then(function () {
+ var dbInfo = self._dbInfo;
+ dbInfo.db.transaction(function (t) {
+ tryExecuteSql(t, dbInfo, 'DELETE FROM ' + dbInfo.storeName + ' WHERE key = ?', [key], function () {
+ resolve();
+ }, function (t, error) {
+ reject(error);
+ });
+ });
+ })["catch"](reject);
+ });
+
+ executeCallback(promise, callback);
+ return promise;
+}
+
+// Deletes every item in the table.
+// TODO: Find out if this resets the AUTO_INCREMENT number.
+function clear$1(callback) {
+ var self = this;
+
+ var promise = new Promise$1(function (resolve, reject) {
+ self.ready().then(function () {
+ var dbInfo = self._dbInfo;
+ dbInfo.db.transaction(function (t) {
+ tryExecuteSql(t, dbInfo, 'DELETE FROM ' + dbInfo.storeName, [], function () {
+ resolve();
+ }, function (t, error) {
+ reject(error);
+ });
+ });
+ })["catch"](reject);
+ });
+
+ executeCallback(promise, callback);
+ return promise;
+}
+
+// Does a simple `COUNT(key)` to get the number of items stored in
+// localForage.
+function length$1(callback) {
+ var self = this;
+
+ var promise = new Promise$1(function (resolve, reject) {
+ self.ready().then(function () {
+ var dbInfo = self._dbInfo;
+ dbInfo.db.transaction(function (t) {
+ // Ahhh, SQL makes this one soooooo easy.
+ tryExecuteSql(t, dbInfo, 'SELECT COUNT(key) as c FROM ' + dbInfo.storeName, [], function (t, results) {
+ var result = results.rows.item(0).c;
+ resolve(result);
+ }, function (t, error) {
+ reject(error);
+ });
+ });
+ })["catch"](reject);
+ });
+
+ executeCallback(promise, callback);
+ return promise;
+}
+
+// Return the key located at key index X; essentially gets the key from a
+// `WHERE id = ?`. This is the most efficient way I can think to implement
+// this rarely-used (in my experience) part of the API, but it can seem
+// inconsistent, because we do `INSERT OR REPLACE INTO` on `setItem()`, so
+// the ID of each key will change every time it's updated. Perhaps a stored
+// procedure for the `setItem()` SQL would solve this problem?
+// TODO: Don't change ID on `setItem()`.
+function key$1(n, callback) {
+ var self = this;
+
+ var promise = new Promise$1(function (resolve, reject) {
+ self.ready().then(function () {
+ var dbInfo = self._dbInfo;
+ dbInfo.db.transaction(function (t) {
+ tryExecuteSql(t, dbInfo, 'SELECT key FROM ' + dbInfo.storeName + ' WHERE id = ? LIMIT 1', [n + 1], function (t, results) {
+ var result = results.rows.length ? results.rows.item(0).key : null;
+ resolve(result);
+ }, function (t, error) {
+ reject(error);
+ });
+ });
+ })["catch"](reject);
+ });
+
+ executeCallback(promise, callback);
+ return promise;
+}
+
+function keys$1(callback) {
+ var self = this;
+
+ var promise = new Promise$1(function (resolve, reject) {
+ self.ready().then(function () {
+ var dbInfo = self._dbInfo;
+ dbInfo.db.transaction(function (t) {
+ tryExecuteSql(t, dbInfo, 'SELECT key FROM ' + dbInfo.storeName, [], function (t, results) {
+ var keys = [];
+
+ for (var i = 0; i < results.rows.length; i++) {
+ keys.push(results.rows.item(i).key);
+ }
+
+ resolve(keys);
+ }, function (t, error) {
+ reject(error);
+ });
+ });
+ })["catch"](reject);
+ });
+
+ executeCallback(promise, callback);
+ return promise;
+}
+
+// https://www.w3.org/TR/webdatabase/#databases
+// > There is no way to enumerate or delete the databases available for an origin from this API.
+function getAllStoreNames(db) {
+ return new Promise$1(function (resolve, reject) {
+ db.transaction(function (t) {
+ t.executeSql('SELECT name FROM sqlite_master ' + "WHERE type='table' AND name <> '__WebKitDatabaseInfoTable__'", [], function (t, results) {
+ var storeNames = [];
+
+ for (var i = 0; i < results.rows.length; i++) {
+ storeNames.push(results.rows.item(i).name);
+ }
+
+ resolve({
+ db: db,
+ storeNames: storeNames
+ });
+ }, function (t, error) {
+ reject(error);
+ });
+ }, function (sqlError) {
+ reject(sqlError);
+ });
+ });
+}
+
+function dropInstance$1(options, callback) {
+ callback = getCallback.apply(this, arguments);
+
+ var currentConfig = this.config();
+ options = typeof options !== 'function' && options || {};
+ if (!options.name) {
+ options.name = options.name || currentConfig.name;
+ options.storeName = options.storeName || currentConfig.storeName;
+ }
+
+ var self = this;
+ var promise;
+ if (!options.name) {
+ promise = Promise$1.reject('Invalid arguments');
+ } else {
+ promise = new Promise$1(function (resolve) {
+ var db;
+ if (options.name === currentConfig.name) {
+ // use the db reference of the current instance
+ db = self._dbInfo.db;
+ } else {
+ db = openDatabase(options.name, '', '', 0);
+ }
+
+ if (!options.storeName) {
+ // drop all database tables
+ resolve(getAllStoreNames(db));
+ } else {
+ resolve({
+ db: db,
+ storeNames: [options.storeName]
+ });
+ }
+ }).then(function (operationInfo) {
+ return new Promise$1(function (resolve, reject) {
+ operationInfo.db.transaction(function (t) {
+ function dropTable(storeName) {
+ return new Promise$1(function (resolve, reject) {
+ t.executeSql('DROP TABLE IF EXISTS ' + storeName, [], function () {
+ resolve();
+ }, function (t, error) {
+ reject(error);
+ });
+ });
+ }
+
+ var operations = [];
+ for (var i = 0, len = operationInfo.storeNames.length; i < len; i++) {
+ operations.push(dropTable(operationInfo.storeNames[i]));
+ }
+
+ Promise$1.all(operations).then(function () {
+ resolve();
+ })["catch"](function (e) {
+ reject(e);
+ });
+ }, function (sqlError) {
+ reject(sqlError);
+ });
+ });
+ });
+ }
+
+ executeCallback(promise, callback);
+ return promise;
+}
+
+var webSQLStorage = {
+ _driver: 'webSQLStorage',
+ _initStorage: _initStorage$1,
+ _support: isWebSQLValid(),
+ iterate: iterate$1,
+ getItem: getItem$1,
+ setItem: setItem$1,
+ removeItem: removeItem$1,
+ clear: clear$1,
+ length: length$1,
+ key: key$1,
+ keys: keys$1,
+ dropInstance: dropInstance$1
+};
+
+function isLocalStorageValid() {
+ try {
+ return typeof localStorage !== 'undefined' && 'setItem' in localStorage &&
+ // in IE8 typeof localStorage.setItem === 'object'
+ !!localStorage.setItem;
+ } catch (e) {
+ return false;
+ }
+}
+
+function _getKeyPrefix(options, defaultConfig) {
+ var keyPrefix = options.name + '/';
+
+ if (options.storeName !== defaultConfig.storeName) {
+ keyPrefix += options.storeName + '/';
+ }
+ return keyPrefix;
+}
+
+// Check if localStorage throws when saving an item
+function checkIfLocalStorageThrows() {
+ var localStorageTestKey = '_localforage_support_test';
+
+ try {
+ localStorage.setItem(localStorageTestKey, true);
+ localStorage.removeItem(localStorageTestKey);
+
+ return false;
+ } catch (e) {
+ return true;
+ }
+}
+
+// Check if localStorage is usable and allows to save an item
+// This method checks if localStorage is usable in Safari Private Browsing
+// mode, or in any other case where the available quota for localStorage
+// is 0 and there wasn't any saved items yet.
+function _isLocalStorageUsable() {
+ return !checkIfLocalStorageThrows() || localStorage.length > 0;
+}
+
+// Config the localStorage backend, using options set in the config.
+function _initStorage$2(options) {
+ var self = this;
+ var dbInfo = {};
+ if (options) {
+ for (var i in options) {
+ dbInfo[i] = options[i];
+ }
+ }
+
+ dbInfo.keyPrefix = _getKeyPrefix(options, self._defaultConfig);
+
+ if (!_isLocalStorageUsable()) {
+ return Promise$1.reject();
+ }
+
+ self._dbInfo = dbInfo;
+ dbInfo.serializer = localforageSerializer;
+
+ return Promise$1.resolve();
+}
+
+// Remove all keys from the datastore, effectively destroying all data in
+// the app's key/value store!
+function clear$2(callback) {
+ var self = this;
+ var promise = self.ready().then(function () {
+ var keyPrefix = self._dbInfo.keyPrefix;
+
+ for (var i = localStorage.length - 1; i >= 0; i--) {
+ var key = localStorage.key(i);
+
+ if (key.indexOf(keyPrefix) === 0) {
+ localStorage.removeItem(key);
+ }
+ }
+ });
+
+ executeCallback(promise, callback);
+ return promise;
+}
+
+// Retrieve an item from the store. Unlike the original async_storage
+// library in Gaia, we don't modify return values at all. If a key's value
+// is `undefined`, we pass that value to the callback function.
+function getItem$2(key, callback) {
+ var self = this;
+
+ key = normalizeKey(key);
+
+ var promise = self.ready().then(function () {
+ var dbInfo = self._dbInfo;
+ var result = localStorage.getItem(dbInfo.keyPrefix + key);
+
+ // If a result was found, parse it from the serialized
+ // string into a JS object. If result isn't truthy, the key
+ // is likely undefined and we'll pass it straight to the
+ // callback.
+ if (result) {
+ result = dbInfo.serializer.deserialize(result);
+ }
+
+ return result;
+ });
+
+ executeCallback(promise, callback);
+ return promise;
+}
+
+// Iterate over all items in the store.
+function iterate$2(iterator, callback) {
+ var self = this;
+
+ var promise = self.ready().then(function () {
+ var dbInfo = self._dbInfo;
+ var keyPrefix = dbInfo.keyPrefix;
+ var keyPrefixLength = keyPrefix.length;
+ var length = localStorage.length;
+
+ // We use a dedicated iterator instead of the `i` variable below
+ // so other keys we fetch in localStorage aren't counted in
+ // the `iterationNumber` argument passed to the `iterate()`
+ // callback.
+ //
+ // See: github.com/mozilla/localForage/pull/435#discussion_r38061530
+ var iterationNumber = 1;
+
+ for (var i = 0; i < length; i++) {
+ var key = localStorage.key(i);
+ if (key.indexOf(keyPrefix) !== 0) {
+ continue;
+ }
+ var value = localStorage.getItem(key);
+
+ // If a result was found, parse it from the serialized
+ // string into a JS object. If result isn't truthy, the
+ // key is likely undefined and we'll pass it straight
+ // to the iterator.
+ if (value) {
+ value = dbInfo.serializer.deserialize(value);
+ }
+
+ value = iterator(value, key.substring(keyPrefixLength), iterationNumber++);
+
+ if (value !== void 0) {
+ return value;
+ }
+ }
+ });
+
+ executeCallback(promise, callback);
+ return promise;
+}
+
+// Same as localStorage's key() method, except takes a callback.
+function key$2(n, callback) {
+ var self = this;
+ var promise = self.ready().then(function () {
+ var dbInfo = self._dbInfo;
+ var result;
+ try {
+ result = localStorage.key(n);
+ } catch (error) {
+ result = null;
+ }
+
+ // Remove the prefix from the key, if a key is found.
+ if (result) {
+ result = result.substring(dbInfo.keyPrefix.length);
+ }
+
+ return result;
+ });
+
+ executeCallback(promise, callback);
+ return promise;
+}
+
+function keys$2(callback) {
+ var self = this;
+ var promise = self.ready().then(function () {
+ var dbInfo = self._dbInfo;
+ var length = localStorage.length;
+ var keys = [];
+
+ for (var i = 0; i < length; i++) {
+ var itemKey = localStorage.key(i);
+ if (itemKey.indexOf(dbInfo.keyPrefix) === 0) {
+ keys.push(itemKey.substring(dbInfo.keyPrefix.length));
+ }
+ }
+
+ return keys;
+ });
+
+ executeCallback(promise, callback);
+ return promise;
+}
+
+// Supply the number of keys in the datastore to the callback function.
+function length$2(callback) {
+ var self = this;
+ var promise = self.keys().then(function (keys) {
+ return keys.length;
+ });
+
+ executeCallback(promise, callback);
+ return promise;
+}
+
+// Remove an item from the store, nice and simple.
+function removeItem$2(key, callback) {
+ var self = this;
+
+ key = normalizeKey(key);
+
+ var promise = self.ready().then(function () {
+ var dbInfo = self._dbInfo;
+ localStorage.removeItem(dbInfo.keyPrefix + key);
+ });
+
+ executeCallback(promise, callback);
+ return promise;
+}
+
+// Set a key's value and run an optional callback once the value is set.
+// Unlike Gaia's implementation, the callback function is passed the value,
+// in case you want to operate on that value only after you're sure it
+// saved, or something like that.
+function setItem$2(key, value, callback) {
+ var self = this;
+
+ key = normalizeKey(key);
+
+ var promise = self.ready().then(function () {
+ // Convert undefined values to null.
+ // https://github.com/mozilla/localForage/pull/42
+ if (value === undefined) {
+ value = null;
+ }
+
+ // Save the original value to pass to the callback.
+ var originalValue = value;
+
+ return new Promise$1(function (resolve, reject) {
+ var dbInfo = self._dbInfo;
+ dbInfo.serializer.serialize(value, function (value, error) {
+ if (error) {
+ reject(error);
+ } else {
+ try {
+ localStorage.setItem(dbInfo.keyPrefix + key, value);
+ resolve(originalValue);
+ } catch (e) {
+ // localStorage capacity exceeded.
+ // TODO: Make this a specific error/event.
+ if (e.name === 'QuotaExceededError' || e.name === 'NS_ERROR_DOM_QUOTA_REACHED') {
+ reject(e);
+ }
+ reject(e);
+ }
+ }
+ });
+ });
+ });
+
+ executeCallback(promise, callback);
+ return promise;
+}
+
+function dropInstance$2(options, callback) {
+ callback = getCallback.apply(this, arguments);
+
+ options = typeof options !== 'function' && options || {};
+ if (!options.name) {
+ var currentConfig = this.config();
+ options.name = options.name || currentConfig.name;
+ options.storeName = options.storeName || currentConfig.storeName;
+ }
+
+ var self = this;
+ var promise;
+ if (!options.name) {
+ promise = Promise$1.reject('Invalid arguments');
+ } else {
+ promise = new Promise$1(function (resolve) {
+ if (!options.storeName) {
+ resolve(options.name + '/');
+ } else {
+ resolve(_getKeyPrefix(options, self._defaultConfig));
+ }
+ }).then(function (keyPrefix) {
+ for (var i = localStorage.length - 1; i >= 0; i--) {
+ var key = localStorage.key(i);
+
+ if (key.indexOf(keyPrefix) === 0) {
+ localStorage.removeItem(key);
+ }
+ }
+ });
+ }
+
+ executeCallback(promise, callback);
+ return promise;
+}
+
+var localStorageWrapper = {
+ _driver: 'localStorageWrapper',
+ _initStorage: _initStorage$2,
+ _support: isLocalStorageValid(),
+ iterate: iterate$2,
+ getItem: getItem$2,
+ setItem: setItem$2,
+ removeItem: removeItem$2,
+ clear: clear$2,
+ length: length$2,
+ key: key$2,
+ keys: keys$2,
+ dropInstance: dropInstance$2
+};
+
+var sameValue = function sameValue(x, y) {
+ return x === y || typeof x === 'number' && typeof y === 'number' && isNaN(x) && isNaN(y);
+};
+
+var includes = function includes(array, searchElement) {
+ var len = array.length;
+ var i = 0;
+ while (i < len) {
+ if (sameValue(array[i], searchElement)) {
+ return true;
+ }
+ i++;
+ }
+
+ return false;
+};
+
+var isArray = Array.isArray || function (arg) {
+ return Object.prototype.toString.call(arg) === '[object Array]';
+};
+
+// Drivers are stored here when `defineDriver()` is called.
+// They are shared across all instances of localForage.
+var DefinedDrivers = {};
+
+var DriverSupport = {};
+
+var DefaultDrivers = {
+ INDEXEDDB: asyncStorage,
+ WEBSQL: webSQLStorage,
+ LOCALSTORAGE: localStorageWrapper
+};
+
+var DefaultDriverOrder = [DefaultDrivers.INDEXEDDB._driver, DefaultDrivers.WEBSQL._driver, DefaultDrivers.LOCALSTORAGE._driver];
+
+var OptionalDriverMethods = ['dropInstance'];
+
+var LibraryMethods = ['clear', 'getItem', 'iterate', 'key', 'keys', 'length', 'removeItem', 'setItem'].concat(OptionalDriverMethods);
+
+var DefaultConfig = {
+ description: '',
+ driver: DefaultDriverOrder.slice(),
+ name: 'localforage',
+ // Default DB size is _JUST UNDER_ 5MB, as it's the highest size
+ // we can use without a prompt.
+ size: 4980736,
+ storeName: 'keyvaluepairs',
+ version: 1.0
+};
+
+function callWhenReady(localForageInstance, libraryMethod) {
+ localForageInstance[libraryMethod] = function () {
+ var _args = arguments;
+ return localForageInstance.ready().then(function () {
+ return localForageInstance[libraryMethod].apply(localForageInstance, _args);
+ });
+ };
+}
+
+function extend() {
+ for (var i = 1; i < arguments.length; i++) {
+ var arg = arguments[i];
+
+ if (arg) {
+ for (var _key in arg) {
+ if (arg.hasOwnProperty(_key)) {
+ if (isArray(arg[_key])) {
+ arguments[0][_key] = arg[_key].slice();
+ } else {
+ arguments[0][_key] = arg[_key];
+ }
+ }
+ }
+ }
+ }
+
+ return arguments[0];
+}
+
+var LocalForage = function () {
+ function LocalForage(options) {
+ _classCallCheck(this, LocalForage);
+
+ for (var driverTypeKey in DefaultDrivers) {
+ if (DefaultDrivers.hasOwnProperty(driverTypeKey)) {
+ var driver = DefaultDrivers[driverTypeKey];
+ var driverName = driver._driver;
+ this[driverTypeKey] = driverName;
+
+ if (!DefinedDrivers[driverName]) {
+ // we don't need to wait for the promise,
+ // since the default drivers can be defined
+ // in a blocking manner
+ this.defineDriver(driver);
+ }
+ }
+ }
+
+ this._defaultConfig = extend({}, DefaultConfig);
+ this._config = extend({}, this._defaultConfig, options);
+ this._driverSet = null;
+ this._initDriver = null;
+ this._ready = false;
+ this._dbInfo = null;
+
+ this._wrapLibraryMethodsWithReady();
+ this.setDriver(this._config.driver)["catch"](function () {});
+ }
+
+ // Set any config values for localForage; can be called anytime before
+ // the first API call (e.g. `getItem`, `setItem`).
+ // We loop through options so we don't overwrite existing config
+ // values.
+
+
+ LocalForage.prototype.config = function config(options) {
+ // If the options argument is an object, we use it to set values.
+ // Otherwise, we return either a specified config value or all
+ // config values.
+ if ((typeof options === 'undefined' ? 'undefined' : _typeof(options)) === 'object') {
+ // If localforage is ready and fully initialized, we can't set
+ // any new configuration values. Instead, we return an error.
+ if (this._ready) {
+ return new Error("Can't call config() after localforage " + 'has been used.');
+ }
+
+ for (var i in options) {
+ if (i === 'storeName') {
+ options[i] = options[i].replace(/\W/g, '_');
+ }
+
+ if (i === 'version' && typeof options[i] !== 'number') {
+ return new Error('Database version must be a number.');
+ }
+
+ this._config[i] = options[i];
+ }
+
+ // after all config options are set and
+ // the driver option is used, try setting it
+ if ('driver' in options && options.driver) {
+ return this.setDriver(this._config.driver);
+ }
+
+ return true;
+ } else if (typeof options === 'string') {
+ return this._config[options];
+ } else {
+ return this._config;
+ }
+ };
+
+ // Used to define a custom driver, shared across all instances of
+ // localForage.
+
+
+ LocalForage.prototype.defineDriver = function defineDriver(driverObject, callback, errorCallback) {
+ var promise = new Promise$1(function (resolve, reject) {
+ try {
+ var driverName = driverObject._driver;
+ var complianceError = new Error('Custom driver not compliant; see ' + 'https://mozilla.github.io/localForage/#definedriver');
+
+ // A driver name should be defined and not overlap with the
+ // library-defined, default drivers.
+ if (!driverObject._driver) {
+ reject(complianceError);
+ return;
+ }
+
+ var driverMethods = LibraryMethods.concat('_initStorage');
+ for (var i = 0, len = driverMethods.length; i < len; i++) {
+ var driverMethodName = driverMethods[i];
+
+ // when the property is there,
+ // it should be a method even when optional
+ var isRequired = !includes(OptionalDriverMethods, driverMethodName);
+ if ((isRequired || driverObject[driverMethodName]) && typeof driverObject[driverMethodName] !== 'function') {
+ reject(complianceError);
+ return;
+ }
+ }
+
+ var configureMissingMethods = function configureMissingMethods() {
+ var methodNotImplementedFactory = function methodNotImplementedFactory(methodName) {
+ return function () {
+ var error = new Error('Method ' + methodName + ' is not implemented by the current driver');
+ var promise = Promise$1.reject(error);
+ executeCallback(promise, arguments[arguments.length - 1]);
+ return promise;
+ };
+ };
+
+ for (var _i = 0, _len = OptionalDriverMethods.length; _i < _len; _i++) {
+ var optionalDriverMethod = OptionalDriverMethods[_i];
+ if (!driverObject[optionalDriverMethod]) {
+ driverObject[optionalDriverMethod] = methodNotImplementedFactory(optionalDriverMethod);
+ }
+ }
+ };
+
+ configureMissingMethods();
+
+ var setDriverSupport = function setDriverSupport(support) {
+ if (DefinedDrivers[driverName]) {
+ console.info('Redefining LocalForage driver: ' + driverName);
+ }
+ DefinedDrivers[driverName] = driverObject;
+ DriverSupport[driverName] = support;
+ // don't use a then, so that we can define
+ // drivers that have simple _support methods
+ // in a blocking manner
+ resolve();
+ };
+
+ if ('_support' in driverObject) {
+ if (driverObject._support && typeof driverObject._support === 'function') {
+ driverObject._support().then(setDriverSupport, reject);
+ } else {
+ setDriverSupport(!!driverObject._support);
+ }
+ } else {
+ setDriverSupport(true);
+ }
+ } catch (e) {
+ reject(e);
+ }
+ });
+
+ executeTwoCallbacks(promise, callback, errorCallback);
+ return promise;
+ };
+
+ LocalForage.prototype.driver = function driver() {
+ return this._driver || null;
+ };
+
+ LocalForage.prototype.getDriver = function getDriver(driverName, callback, errorCallback) {
+ var getDriverPromise = DefinedDrivers[driverName] ? Promise$1.resolve(DefinedDrivers[driverName]) : Promise$1.reject(new Error('Driver not found.'));
+
+ executeTwoCallbacks(getDriverPromise, callback, errorCallback);
+ return getDriverPromise;
+ };
+
+ LocalForage.prototype.getSerializer = function getSerializer(callback) {
+ var serializerPromise = Promise$1.resolve(localforageSerializer);
+ executeTwoCallbacks(serializerPromise, callback);
+ return serializerPromise;
+ };
+
+ LocalForage.prototype.ready = function ready(callback) {
+ var self = this;
+
+ var promise = self._driverSet.then(function () {
+ if (self._ready === null) {
+ self._ready = self._initDriver();
+ }
+
+ return self._ready;
+ });
+
+ executeTwoCallbacks(promise, callback, callback);
+ return promise;
+ };
+
+ LocalForage.prototype.setDriver = function setDriver(drivers, callback, errorCallback) {
+ var self = this;
+
+ if (!isArray(drivers)) {
+ drivers = [drivers];
+ }
+
+ var supportedDrivers = this._getSupportedDrivers(drivers);
+
+ function setDriverToConfig() {
+ self._config.driver = self.driver();
+ }
+
+ function extendSelfWithDriver(driver) {
+ self._extend(driver);
+ setDriverToConfig();
+
+ self._ready = self._initStorage(self._config);
+ return self._ready;
+ }
+
+ function initDriver(supportedDrivers) {
+ return function () {
+ var currentDriverIndex = 0;
+
+ function driverPromiseLoop() {
+ while (currentDriverIndex < supportedDrivers.length) {
+ var driverName = supportedDrivers[currentDriverIndex];
+ currentDriverIndex++;
+
+ self._dbInfo = null;
+ self._ready = null;
+
+ return self.getDriver(driverName).then(extendSelfWithDriver)["catch"](driverPromiseLoop);
+ }
+
+ setDriverToConfig();
+ var error = new Error('No available storage method found.');
+ self._driverSet = Promise$1.reject(error);
+ return self._driverSet;
+ }
+
+ return driverPromiseLoop();
+ };
+ }
+
+ // There might be a driver initialization in progress
+ // so wait for it to finish in order to avoid a possible
+ // race condition to set _dbInfo
+ var oldDriverSetDone = this._driverSet !== null ? this._driverSet["catch"](function () {
+ return Promise$1.resolve();
+ }) : Promise$1.resolve();
+
+ this._driverSet = oldDriverSetDone.then(function () {
+ var driverName = supportedDrivers[0];
+ self._dbInfo = null;
+ self._ready = null;
+
+ return self.getDriver(driverName).then(function (driver) {
+ self._driver = driver._driver;
+ setDriverToConfig();
+ self._wrapLibraryMethodsWithReady();
+ self._initDriver = initDriver(supportedDrivers);
+ });
+ })["catch"](function () {
+ setDriverToConfig();
+ var error = new Error('No available storage method found.');
+ self._driverSet = Promise$1.reject(error);
+ return self._driverSet;
+ });
+
+ executeTwoCallbacks(this._driverSet, callback, errorCallback);
+ return this._driverSet;
+ };
+
+ LocalForage.prototype.supports = function supports(driverName) {
+ return !!DriverSupport[driverName];
+ };
+
+ LocalForage.prototype._extend = function _extend(libraryMethodsAndProperties) {
+ extend(this, libraryMethodsAndProperties);
+ };
+
+ LocalForage.prototype._getSupportedDrivers = function _getSupportedDrivers(drivers) {
+ var supportedDrivers = [];
+ for (var i = 0, len = drivers.length; i < len; i++) {
+ var driverName = drivers[i];
+ if (this.supports(driverName)) {
+ supportedDrivers.push(driverName);
+ }
+ }
+ return supportedDrivers;
+ };
+
+ LocalForage.prototype._wrapLibraryMethodsWithReady = function _wrapLibraryMethodsWithReady() {
+ // Add a stub for each driver API method that delays the call to the
+ // corresponding driver method until localForage is ready. These stubs
+ // will be replaced by the driver methods as soon as the driver is
+ // loaded, so there is no performance impact.
+ for (var i = 0, len = LibraryMethods.length; i < len; i++) {
+ callWhenReady(this, LibraryMethods[i]);
+ }
+ };
+
+ LocalForage.prototype.createInstance = function createInstance(options) {
+ return new LocalForage(options);
+ };
+
+ return LocalForage;
+}();
+
+// The actual localForage object that we expose as a module or via a
+// global. It's extended by pulling in one of our other libraries.
+
+
+var localforage_js = new LocalForage();
+
+module.exports = localforage_js;
+
+},{"3":3}]},{},[4])(4)
+});
+
+/* WEBPACK VAR INJECTION */}.call(this, __webpack_require__(27)))
+
+/***/ }),
+/* 33 */
+/***/ (function(module, __webpack_exports__, __webpack_require__) {
+
+"use strict";
+/* WEBPACK VAR INJECTION */(function(global, process) {/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "b", function() { return getGlobalNamespace; });
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return getGlobal; });
+/**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+// Note that the identifier globalNameSpace is scoped to this module, but will
+// always resolve to the same global object regardless of how the module is
+// resolved.
+// tslint:disable-next-line:no-any
+let globalNameSpace;
+// tslint:disable-next-line:no-any
+function getGlobalNamespace() {
+ if (globalNameSpace == null) {
+ // tslint:disable-next-line:no-any
+ let ns;
+ if (typeof (window) !== 'undefined') {
+ ns = window;
+ }
+ else if (typeof (global) !== 'undefined') {
+ ns = global;
+ }
+ else if (typeof (process) !== 'undefined') {
+ ns = process;
+ }
+ else if (typeof (self) !== 'undefined') {
+ ns = self;
+ }
+ else {
+ throw new Error('Could not find a global object');
+ }
+ globalNameSpace = ns;
+ }
+ return globalNameSpace;
+}
+// tslint:disable-next-line:no-any
+function getGlobalMap() {
+ const ns = getGlobalNamespace();
+ if (ns._tfGlobals == null) {
+ ns._tfGlobals = new Map();
+ }
+ return ns._tfGlobals;
+}
+/**
+ * Returns a globally accessible 'singleton' object.
+ *
+ * @param key the name of the object
+ * @param init a function to initialize to initialize this object
+ * the first time it is fetched.
+ */
+function getGlobal(key, init) {
+ const globalMap = getGlobalMap();
+ if (globalMap.has(key)) {
+ return globalMap.get(key);
+ }
+ else {
+ const singleton = init();
+ globalMap.set(key, singleton);
+ return globalMap.get(key);
+ }
+}
+//# sourceMappingURL=global_util.js.map
+/* WEBPACK VAR INJECTION */}.call(this, __webpack_require__(27), __webpack_require__(35)))
+
+/***/ }),
+/* 34 */
+/***/ (function(module, __webpack_exports__, __webpack_require__) {
+
+"use strict";
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return DTYPE_VALUE_SIZE_MAP; });
+/**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+/* Type definitions for exporting and importing of models. */
+/**
+ * A map from Tensor dtype to number of bytes per element of the Tensor.
+ */
+const DTYPE_VALUE_SIZE_MAP = {
+ 'float32': 4,
+ 'float16': 2,
+ 'int32': 4,
+ 'uint16': 2,
+ 'uint8': 1,
+ 'bool': 1,
+ 'complex64': 8
+};
+//# sourceMappingURL=types.js.map
+
+/***/ }),
+/* 35 */
+/***/ (function(module, exports) {
+
+// shim for using process in browser
+var process = module.exports = {};
+
+// cached from whatever global is present so that test runners that stub it
+// don't break things. But we need to wrap it in a try catch in case it is
+// wrapped in strict mode code which doesn't define any globals. It's inside a
+// function because try/catches deoptimize in certain engines.
+
+var cachedSetTimeout;
+var cachedClearTimeout;
+
+function defaultSetTimout() {
+ throw new Error('setTimeout has not been defined');
+}
+function defaultClearTimeout () {
+ throw new Error('clearTimeout has not been defined');
+}
+(function () {
+ try {
+ if (typeof setTimeout === 'function') {
+ cachedSetTimeout = setTimeout;
+ } else {
+ cachedSetTimeout = defaultSetTimout;
+ }
+ } catch (e) {
+ cachedSetTimeout = defaultSetTimout;
+ }
+ try {
+ if (typeof clearTimeout === 'function') {
+ cachedClearTimeout = clearTimeout;
+ } else {
+ cachedClearTimeout = defaultClearTimeout;
+ }
+ } catch (e) {
+ cachedClearTimeout = defaultClearTimeout;
+ }
+} ())
+function runTimeout(fun) {
+ if (cachedSetTimeout === setTimeout) {
+ //normal enviroments in sane situations
+ return setTimeout(fun, 0);
+ }
+ // if setTimeout wasn't available but was latter defined
+ if ((cachedSetTimeout === defaultSetTimout || !cachedSetTimeout) && setTimeout) {
+ cachedSetTimeout = setTimeout;
+ return setTimeout(fun, 0);
+ }
+ try {
+ // when when somebody has screwed with setTimeout but no I.E. maddness
+ return cachedSetTimeout(fun, 0);
+ } catch(e){
+ try {
+ // When we are in I.E. but the script has been evaled so I.E. doesn't trust the global object when called normally
+ return cachedSetTimeout.call(null, fun, 0);
+ } catch(e){
+ // same as above but when it's a version of I.E. that must have the global object for 'this', hopfully our context correct otherwise it will throw a global error
+ return cachedSetTimeout.call(this, fun, 0);
+ }
+ }
+
+
+}
+function runClearTimeout(marker) {
+ if (cachedClearTimeout === clearTimeout) {
+ //normal enviroments in sane situations
+ return clearTimeout(marker);
+ }
+ // if clearTimeout wasn't available but was latter defined
+ if ((cachedClearTimeout === defaultClearTimeout || !cachedClearTimeout) && clearTimeout) {
+ cachedClearTimeout = clearTimeout;
+ return clearTimeout(marker);
+ }
+ try {
+ // when when somebody has screwed with setTimeout but no I.E. maddness
+ return cachedClearTimeout(marker);
+ } catch (e){
+ try {
+ // When we are in I.E. but the script has been evaled so I.E. doesn't trust the global object when called normally
+ return cachedClearTimeout.call(null, marker);
+ } catch (e){
+ // same as above but when it's a version of I.E. that must have the global object for 'this', hopfully our context correct otherwise it will throw a global error.
+ // Some versions of I.E. have different rules for clearTimeout vs setTimeout
+ return cachedClearTimeout.call(this, marker);
+ }
+ }
+
+
+
+}
+var queue = [];
+var draining = false;
+var currentQueue;
+var queueIndex = -1;
+
+function cleanUpNextTick() {
+ if (!draining || !currentQueue) {
+ return;
+ }
+ draining = false;
+ if (currentQueue.length) {
+ queue = currentQueue.concat(queue);
+ } else {
+ queueIndex = -1;
+ }
+ if (queue.length) {
+ drainQueue();
+ }
+}
+
+function drainQueue() {
+ if (draining) {
+ return;
+ }
+ var timeout = runTimeout(cleanUpNextTick);
+ draining = true;
+
+ var len = queue.length;
+ while(len) {
+ currentQueue = queue;
+ queue = [];
+ while (++queueIndex < len) {
+ if (currentQueue) {
+ currentQueue[queueIndex].run();
+ }
+ }
+ queueIndex = -1;
+ len = queue.length;
+ }
+ currentQueue = null;
+ draining = false;
+ runClearTimeout(timeout);
+}
+
+process.nextTick = function (fun) {
+ var args = new Array(arguments.length - 1);
+ if (arguments.length > 1) {
+ for (var i = 1; i < arguments.length; i++) {
+ args[i - 1] = arguments[i];
+ }
+ }
+ queue.push(new Item(fun, args));
+ if (queue.length === 1 && !draining) {
+ runTimeout(drainQueue);
+ }
+};
+
+// v8 likes predictible objects
+function Item(fun, array) {
+ this.fun = fun;
+ this.array = array;
+}
+Item.prototype.run = function () {
+ this.fun.apply(null, this.array);
+};
+process.title = 'browser';
+process.browser = true;
+process.env = {};
+process.argv = [];
+process.version = ''; // empty string to avoid regexp issues
+process.versions = {};
+
+function noop() {}
+
+process.on = noop;
+process.addListener = noop;
+process.once = noop;
+process.off = noop;
+process.removeListener = noop;
+process.removeAllListeners = noop;
+process.emit = noop;
+process.prependListener = noop;
+process.prependOnceListener = noop;
+
+process.listeners = function (name) { return [] }
+
+process.binding = function (name) {
+ throw new Error('process.binding is not supported');
+};
+
+process.cwd = function () { return '/' };
+process.chdir = function (dir) {
+ throw new Error('process.chdir is not supported');
+};
+process.umask = function() { return 0; };
+
+
+/***/ }),
+/* 36 */
+/***/ (function(module, __webpack_exports__, __webpack_require__) {
+
+"use strict";
+__webpack_require__.r(__webpack_exports__);
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "isMobile", function() { return isMobile; });
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "isBrowser", function() { return isBrowser; });
+/**
+ * @license
+ * Copyright 2017 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+// tslint:disable-next-line:no-any
+function _isNavigatorDefined() {
+ return typeof navigator !== 'undefined' && navigator != null;
+}
+function isMobile() {
+ if (_isNavigatorDefined()) {
+ // tslint:disable-next-line:no-any
+ const a = navigator.userAgent || navigator.vendor || window.opera;
+ // tslint:disable-next-line:max-line-length
+ return /(android|bb\d+|meego).+mobile|avantgo|bada\/|blackberry|blazer|compal|elaine|fennec|hiptop|iemobile|ip(hone|od)|iris|kindle|lge |maemo|midp|mmp|mobile.+firefox|netfront|opera m(ob|in)i|palm( os)?|phone|p(ixi|re)\/|plucker|pocket|psp|series(4|6)0|symbian|treo|up\.(browser|link)|vodafone|wap|windows ce|xda|xiino/i
+ .test(a) ||
+ // tslint:disable-next-line:max-line-length
+ /1207|6310|6590|3gso|4thp|50[1-6]i|770s|802s|a wa|abac|ac(er|oo|s\-)|ai(ko|rn)|al(av|ca|co)|amoi|an(ex|ny|yw)|aptu|ar(ch|go)|as(te|us)|attw|au(di|\-m|r |s )|avan|be(ck|ll|nq)|bi(lb|rd)|bl(ac|az)|br(e|v)w|bumb|bw\-(n|u)|c55\/|capi|ccwa|cdm\-|cell|chtm|cldc|cmd\-|co(mp|nd)|craw|da(it|ll|ng)|dbte|dc\-s|devi|dica|dmob|do(c|p)o|ds(12|\-d)|el(49|ai)|em(l2|ul)|er(ic|k0)|esl8|ez([4-7]0|os|wa|ze)|fetc|fly(\-|_)|g1 u|g560|gene|gf\-5|g\-mo|go(\.w|od)|gr(ad|un)|haie|hcit|hd\-(m|p|t)|hei\-|hi(pt|ta)|hp( i|ip)|hs\-c|ht(c(\-| |_|a|g|p|s|t)|tp)|hu(aw|tc)|i\-(20|go|ma)|i230|iac( |\-|\/)|ibro|idea|ig01|ikom|im1k|inno|ipaq|iris|ja(t|v)a|jbro|jemu|jigs|kddi|keji|kgt( |\/)|klon|kpt |kwc\-|kyo(c|k)|le(no|xi)|lg( g|\/(k|l|u)|50|54|\-[a-w])|libw|lynx|m1\-w|m3ga|m50\/|ma(te|ui|xo)|mc(01|21|ca)|m\-cr|me(rc|ri)|mi(o8|oa|ts)|mmef|mo(01|02|bi|de|do|t(\-| |o|v)|zz)|mt(50|p1|v )|mwbp|mywa|n10[0-2]|n20[2-3]|n30(0|2)|n50(0|2|5)|n7(0(0|1)|10)|ne((c|m)\-|on|tf|wf|wg|wt)|nok(6|i)|nzph|o2im|op(ti|wv)|oran|owg1|p800|pan(a|d|t)|pdxg|pg(13|\-([1-8]|c))|phil|pire|pl(ay|uc)|pn\-2|po(ck|rt|se)|prox|psio|pt\-g|qa\-a|qc(07|12|21|32|60|\-[2-7]|i\-)|qtek|r380|r600|raks|rim9|ro(ve|zo)|s55\/|sa(ge|ma|mm|ms|ny|va)|sc(01|h\-|oo|p\-)|sdk\/|se(c(\-|0|1)|47|mc|nd|ri)|sgh\-|shar|sie(\-|m)|sk\-0|sl(45|id)|sm(al|ar|b3|it|t5)|so(ft|ny)|sp(01|h\-|v\-|v )|sy(01|mb)|t2(18|50)|t6(00|10|18)|ta(gt|lk)|tcl\-|tdg\-|tel(i|m)|tim\-|t\-mo|to(pl|sh)|ts(70|m\-|m3|m5)|tx\-9|up(\.b|g1|si)|utst|v400|v750|veri|vi(rg|te)|vk(40|5[0-3]|\-v)|vm40|voda|vulc|vx(52|53|60|61|70|80|81|83|85|98)|w3c(\-| )|webc|whit|wi(g |nc|nw)|wmlb|wonu|x700|yas\-|your|zeto|zte\-/i
+ .test(a.substr(0, 4));
+ }
+ return false;
+}
+function isBrowser() {
+ return (typeof window !== 'undefined' && window.document != null) ||
+ //@ts-ignore
+ (typeof WorkerGlobalScope !== 'undefined');
+}
+//# sourceMappingURL=device_util.js.map
+
+/***/ }),
+/* 37 */
+/***/ (function(module, __webpack_exports__, __webpack_require__) {
+
+"use strict";
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return maxImpl; });
+/* harmony import */ var _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(0);
+/**
+ * @license
+ * Copyright 2020 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+function maxImpl(aVals, reduceSize, outShape, dtype) {
+ const vals = _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__["util"].getTypedArrayFromDType(dtype, _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__["util"].sizeFromShape(outShape));
+ for (let i = 0; i < vals.length; ++i) {
+ const offset = i * reduceSize;
+ let max = aVals[offset];
+ for (let j = 0; j < reduceSize; ++j) {
+ const value = aVals[offset + j];
+ if (value > max) {
+ max = value;
+ }
+ }
+ vals[i] = max;
+ }
+ return vals;
+}
+//# sourceMappingURL=Max_impl.js.map
+
+/***/ }),
+/* 38 */
+/***/ (function(module, __webpack_exports__, __webpack_require__) {
+
+"use strict";
+// ESM COMPAT FLAG
+__webpack_require__.r(__webpack_exports__);
+
+// EXPORTS
+__webpack_require__.d(__webpack_exports__, "GraphModel", function() { return /* reexport */ graph_model_GraphModel; });
+__webpack_require__.d(__webpack_exports__, "loadGraphModel", function() { return /* reexport */ loadGraphModel; });
+__webpack_require__.d(__webpack_exports__, "deregisterOp", function() { return /* reexport */ register["a" /* deregisterOp */]; });
+__webpack_require__.d(__webpack_exports__, "registerOp", function() { return /* reexport */ register["c" /* registerOp */]; });
+__webpack_require__.d(__webpack_exports__, "version_converter", function() { return /* reexport */ version; });
+
+// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/index.js + 269 modules
+var dist = __webpack_require__(0);
+
+// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-converter/dist/operations/operation_mapper.js
+var operation_mapper = __webpack_require__(15);
+
+// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-converter/dist/operations/executors/utils.js
+var utils = __webpack_require__(2);
+
+// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-converter/dist/operations/custom_op/node_value_impl.js
+/**
+ * @license
+ * Copyright 2019 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+
+/**
+ * Helper class for lookup inputs and params for nodes in the model graph.
+ */
+class node_value_impl_NodeValueImpl {
+ constructor(node, tensorMap, context) {
+ this.node = node;
+ this.tensorMap = tensorMap;
+ this.context = context;
+ this.inputs = [];
+ this.attrs = {};
+ this.inputs = node.inputNames.map(name => this.getInput(name));
+ if (node.rawAttrs != null) {
+ this.attrs = Object.keys(node.rawAttrs)
+ .reduce((attrs, key) => {
+ attrs[key] = this.getAttr(key);
+ return attrs;
+ }, {});
+ }
+ }
+ /**
+ * Return the value of the attribute or input param.
+ * @param name String: name of attribute or input param.
+ */
+ getInput(name) {
+ return Object(utils["c" /* getTensor */])(name, this.tensorMap, this.context);
+ }
+ /**
+ * Return the value of the attribute or input param.
+ * @param name String: name of attribute or input param.
+ */
+ getAttr(name, defaultValue) {
+ const value = this.node.rawAttrs[name];
+ if (value.tensor != null) {
+ return Object(utils["c" /* getTensor */])(name, this.tensorMap, this.context);
+ }
+ if (value.i != null || value.f != null) {
+ return Object(operation_mapper["f" /* getNumberParam */])(this.node.rawAttrs, name, defaultValue);
+ }
+ if (value.s != null) {
+ return Object(operation_mapper["i" /* getStringParam */])(this.node.rawAttrs, name, defaultValue);
+ }
+ if (value.b != null) {
+ return Object(operation_mapper["c" /* getBoolParam */])(this.node.rawAttrs, name, defaultValue);
+ }
+ if (value.shape != null) {
+ return Object(operation_mapper["k" /* getTensorShapeParam */])(this.node.rawAttrs, name, defaultValue);
+ }
+ if (value.type != null) {
+ return Object(operation_mapper["e" /* getDtypeParam */])(this.node.rawAttrs, name, defaultValue);
+ }
+ if (value.list != null) {
+ if (value.list.i != null || value.list.f != null) {
+ return Object(operation_mapper["g" /* getNumericArrayParam */])(this.node.rawAttrs, name, defaultValue);
+ }
+ if (value.list.s != null) {
+ return Object(operation_mapper["h" /* getStringArrayParam */])(this.node.rawAttrs, name, defaultValue);
+ }
+ if (value.list.shape != null) {
+ return Object(operation_mapper["j" /* getTensorShapeArrayParam */])(this.node.rawAttrs, name, defaultValue);
+ }
+ if (value.list.b != null) {
+ return Object(operation_mapper["b" /* getBoolArrayParam */])(this.node.rawAttrs, name, defaultValue);
+ }
+ if (value.list.type != null) {
+ return Object(operation_mapper["d" /* getDtypeArrayParam */])(this.node.rawAttrs, name, defaultValue);
+ }
+ }
+ return defaultValue;
+ }
+}
+//# sourceMappingURL=node_value_impl.js.map
+// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-converter/dist/operations/custom_op/register.js
+var register = __webpack_require__(24);
+
+// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-converter/dist/operations/executors/arithmetic_executor.js
+/**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+
+const executeOp = (node, tensorMap, context) => {
+ switch (node.op) {
+ case 'BiasAdd':
+ case 'AddV2':
+ case 'Add': {
+ return [dist["add"](Object(utils["b" /* getParamValue */])('a', node, tensorMap, context), Object(utils["b" /* getParamValue */])('b', node, tensorMap, context))];
+ }
+ case 'AddN': {
+ return [dist["addN"](Object(utils["b" /* getParamValue */])('tensors', node, tensorMap, context))];
+ }
+ case 'FloorMod':
+ case 'Mod':
+ return [dist["mod"](Object(utils["b" /* getParamValue */])('a', node, tensorMap, context), Object(utils["b" /* getParamValue */])('b', node, tensorMap, context))];
+ case 'Mul':
+ return [dist["mul"](Object(utils["b" /* getParamValue */])('a', node, tensorMap, context), Object(utils["b" /* getParamValue */])('b', node, tensorMap, context))];
+ case 'RealDiv':
+ case 'Div': {
+ return [dist["div"](Object(utils["b" /* getParamValue */])('a', node, tensorMap, context), Object(utils["b" /* getParamValue */])('b', node, tensorMap, context))];
+ }
+ case 'DivNoNan': {
+ return [dist["divNoNan"](Object(utils["b" /* getParamValue */])('a', node, tensorMap, context), Object(utils["b" /* getParamValue */])('b', node, tensorMap, context))];
+ }
+ case 'FloorDiv': {
+ return [dist["floorDiv"](Object(utils["b" /* getParamValue */])('a', node, tensorMap, context), Object(utils["b" /* getParamValue */])('b', node, tensorMap, context))];
+ }
+ case 'Sub': {
+ return [dist["sub"](Object(utils["b" /* getParamValue */])('a', node, tensorMap, context), Object(utils["b" /* getParamValue */])('b', node, tensorMap, context))];
+ }
+ case 'Minimum': {
+ return [dist["minimum"](Object(utils["b" /* getParamValue */])('a', node, tensorMap, context), Object(utils["b" /* getParamValue */])('b', node, tensorMap, context))];
+ }
+ case 'Maximum': {
+ return [dist["maximum"](Object(utils["b" /* getParamValue */])('a', node, tensorMap, context), Object(utils["b" /* getParamValue */])('b', node, tensorMap, context))];
+ }
+ case 'Pow': {
+ return [dist["pow"](Object(utils["b" /* getParamValue */])('a', node, tensorMap, context), Object(utils["b" /* getParamValue */])('b', node, tensorMap, context))];
+ }
+ case 'SquaredDifference': {
+ return [dist["squaredDifference"](Object(utils["b" /* getParamValue */])('a', node, tensorMap, context), Object(utils["b" /* getParamValue */])('b', node, tensorMap, context))];
+ }
+ default:
+ throw TypeError(`Node type ${node.op} is not implemented`);
+ }
+};
+const CATEGORY = 'arithmetic';
+//# sourceMappingURL=arithmetic_executor.js.map
+// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-converter/dist/operations/executors/basic_math_executor.js
+/**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+
+const basic_math_executor_executeOp = (node, tensorMap, context) => {
+ switch (node.op) {
+ case 'Abs':
+ case 'ComplexAbs':
+ return [dist["abs"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context))];
+ case 'Acos':
+ return [dist["acos"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context))];
+ case 'Acosh':
+ return [dist["acosh"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context))];
+ case 'Asin':
+ return [dist["asin"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context))];
+ case 'Asinh':
+ return [dist["asinh"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context))];
+ case 'Atan':
+ return [dist["atan"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context))];
+ case 'Atan2':
+ return [dist["atan2"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context), Object(utils["b" /* getParamValue */])('y', node, tensorMap, context))];
+ case 'Atanh':
+ return [dist["atanh"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context))];
+ case 'Ceil':
+ return [dist["ceil"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context))];
+ case 'Complex':
+ return [dist["complex"](Object(utils["b" /* getParamValue */])('real', node, tensorMap, context), Object(utils["b" /* getParamValue */])('imag', node, tensorMap, context))];
+ case 'Cos':
+ return [dist["cos"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context))];
+ case 'Cosh':
+ return [dist["cosh"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context))];
+ case 'Elu':
+ return [dist["elu"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context))];
+ case 'Erf':
+ return [dist["erf"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context))];
+ case 'Exp':
+ return [dist["exp"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context))];
+ case 'Expm1': {
+ return [dist["expm1"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context))];
+ }
+ case 'Floor':
+ return [dist["floor"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context))];
+ case 'Log':
+ return [dist["log"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context))];
+ case 'Log1p': {
+ return [dist["log1p"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context))];
+ }
+ case 'Imag':
+ return [dist["imag"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context))];
+ case 'Neg':
+ return [dist["neg"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context))];
+ case 'Reciprocal': {
+ return [dist["reciprocal"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context))];
+ }
+ case 'Real':
+ return [dist["real"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context))];
+ case 'Relu':
+ return [dist["relu"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context))];
+ case 'Round': {
+ return [dist["round"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context))];
+ }
+ case 'Selu':
+ return [dist["selu"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context))];
+ case 'Sigmoid':
+ return [dist["sigmoid"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context))];
+ case 'Sin':
+ return [dist["sin"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context))];
+ case 'Sign': {
+ return [dist["sign"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context))];
+ }
+ case 'Sinh': {
+ return [dist["sinh"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context))];
+ }
+ case 'Softplus': {
+ return [dist["softplus"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context))];
+ }
+ case 'Sqrt': {
+ return [dist["sqrt"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context))];
+ }
+ case 'Square': {
+ return [dist["square"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context))];
+ }
+ case 'Tanh': {
+ return [dist["tanh"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context))];
+ }
+ case 'Tan':
+ return [dist["tan"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context))];
+ case 'Relu6':
+ case 'ClipByValue':
+ return [dist["clipByValue"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context), Object(utils["b" /* getParamValue */])('clipValueMin', node, tensorMap, context), Object(utils["b" /* getParamValue */])('clipValueMax', node, tensorMap, context))];
+ case 'Rsqrt':
+ return [dist["rsqrt"](Object(utils["c" /* getTensor */])(node.inputNames[0], tensorMap, context))];
+ case 'Prod':
+ return [dist["prod"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context), Object(utils["b" /* getParamValue */])('axes', node, tensorMap, context))];
+ case 'LeakyRelu':
+ return [dist["leakyRelu"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context), Object(utils["b" /* getParamValue */])('alpha', node, tensorMap, context))];
+ case 'Prelu':
+ return [dist["prelu"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context), Object(utils["b" /* getParamValue */])('alpha', node, tensorMap, context))];
+ default:
+ throw TypeError(`Node type ${node.op} is not implemented`);
+ }
+};
+const basic_math_executor_CATEGORY = 'basic_math';
+//# sourceMappingURL=basic_math_executor.js.map
+// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-converter/dist/executor/tensor_utils.js
+/**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+/**
+ * This differs from util.assertShapesMatch in that it allows values of
+ * negative one, an undefined size of a dimensinon, in a shape to match
+ * anything.
+ */
+
+function assertShapesMatchAllowUndefinedSize(shapeA, shapeB, errorMessagePrefix = '') {
+ dist["util"].assert(shapesEqualAllowUndefinedSize(shapeA, shapeB), () => errorMessagePrefix + ` Shapes ${shapeA} and ${shapeB} must match`);
+}
+function shapesEqualAllowUndefinedSize(n1, n2) {
+ if (n1.length !== n2.length) {
+ return false;
+ }
+ for (let i = 0; i < n1.length; i++) {
+ if (n1[i] !== -1 && n2[i] !== -1 && n1[i] !== n2[i]) {
+ return false;
+ }
+ }
+ return true;
+}
+//# sourceMappingURL=tensor_utils.js.map
+// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-converter/dist/executor/tensor_array.js
+/**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+
+/**
+ * The TensorArray object keeps an array of Tensors. It
+ * allows reading from the array and writing to the array.
+ */
+class tensor_array_TensorArray {
+ constructor(name, dtype, maxSize, elementShape, identicalElementShapes, dynamicSize, clearAfterRead) {
+ this.name = name;
+ this.dtype = dtype;
+ this.maxSize = maxSize;
+ this.elementShape = elementShape;
+ this.identicalElementShapes = identicalElementShapes;
+ this.dynamicSize = dynamicSize;
+ this.clearAfterRead = clearAfterRead;
+ this.tensors = [];
+ this.closed_ = false;
+ this.id = tensor_array_TensorArray.nextId++;
+ }
+ get closed() {
+ return this.closed_;
+ }
+ /**
+ * Close the current TensorArray.
+ */
+ clearAndClose() {
+ this.tensors.forEach(tensor => tensor.tensor.dispose());
+ this.tensors = [];
+ this.closed_ = true;
+ }
+ size() {
+ return this.tensors.length;
+ }
+ /**
+ * Read the value at location index in the TensorArray.
+ * @param index Number the index to read from.
+ */
+ read(index) {
+ if (this.closed_) {
+ throw new Error(`TensorArray ${this.name} has already been closed.`);
+ }
+ if (index < 0 || index >= this.size()) {
+ throw new Error(`Tried to read from index ${index}, but array size is: ${this.size()}`);
+ }
+ const tensorWithState = this.tensors[index];
+ if (tensorWithState.cleared) {
+ throw new Error(`TensorArray ${this.name}: Could not read index ${index} twice because it was cleared after a previous read ` +
+ `(perhaps try setting clear_after_read = false?).`);
+ }
+ if (this.clearAfterRead) {
+ tensorWithState.cleared = true;
+ }
+ tensorWithState.read = true;
+ return tensorWithState.tensor;
+ }
+ /**
+ * Helper method to read multiple tensors from the specified indices.
+ */
+ readMany(indices) {
+ return indices.map(index => this.read(index));
+ }
+ /**
+ * Write value into the index of the TensorArray.
+ * @param index number the index to write to.
+ * @param tensor
+ */
+ write(index, tensor) {
+ if (this.closed_) {
+ throw new Error(`TensorArray ${this.name} has already been closed.`);
+ }
+ if (index < 0 || !this.dynamicSize && index >= this.maxSize) {
+ throw new Error(`Tried to write to index ${index}, but array is not resizeable and size is: ${this.maxSize}`);
+ }
+ const t = this.tensors[index] || {};
+ if (tensor.dtype !== this.dtype) {
+ throw new Error(`TensorArray ${this.name}: Could not write to TensorArray index ${index},
+ because the value dtype is ${tensor.dtype}, but TensorArray dtype is ${this.dtype}.`);
+ }
+ // Set the shape for the first time write to unknow shape tensor array
+ if (this.size() === 0 &&
+ (this.elementShape == null || this.elementShape.length === 0)) {
+ this.elementShape = tensor.shape;
+ }
+ assertShapesMatchAllowUndefinedSize(this.elementShape, tensor.shape, `TensorArray ${this.name}: Could not write to TensorArray index ${index}.`);
+ if (t && t.read) {
+ throw new Error(`TensorArray ${this.name}: Could not write to TensorArray index ${index}, because it has already been read.`);
+ }
+ if (t && t.written) {
+ throw new Error(`TensorArray ${this.name}: Could not write to TensorArray index ${index}, because it has already been written.`);
+ }
+ t.tensor = tensor;
+ t.written = true;
+ this.tensors[index] = t;
+ }
+ /**
+ * Helper method to write multiple tensors to the specified indices.
+ */
+ writeMany(indices, tensors) {
+ if (indices.length !== tensors.length) {
+ throw new Error(`TensorArray ${this.name}: could not write multiple tensors,` +
+ `because the index size: ${indices.length} is not the same as tensors size: ${tensors.length}.`);
+ }
+ indices.forEach((i, index) => this.write(i, tensors[index]));
+ }
+ /**
+ * Return selected values in the TensorArray as a packed Tensor. All of
+ * selected values must have been written and their shapes must all match.
+ * @param [indices] number[] Optional. Taking values in [0, max_value). If the
+ * TensorArray is not dynamic, max_value=size(). If not specified returns
+ * all tensors in the original order.
+ * @param [dtype]
+ */
+ gather(indices, dtype) {
+ if (!!dtype && dtype !== this.dtype) {
+ throw new Error(`TensorArray dtype is ${this.dtype} but gather requested dtype ${dtype}`);
+ }
+ if (!indices) {
+ indices = [];
+ for (let i = 0; i < this.size(); i++) {
+ indices.push(i);
+ }
+ }
+ else {
+ indices = indices.slice(0, this.size());
+ }
+ if (indices.length === 0) {
+ return Object(dist["tensor"])([], [0].concat(this.elementShape));
+ }
+ // Read all the PersistentTensors into a vector to keep track of
+ // their memory.
+ const tensors = this.readMany(indices);
+ assertShapesMatchAllowUndefinedSize(this.elementShape, tensors[0].shape, 'TensorArray shape mismatch: ');
+ return Object(dist["stack"])(tensors, 0);
+ }
+ /**
+ * Return the values in the TensorArray as a concatenated Tensor.
+ */
+ concat(dtype) {
+ if (!!dtype && dtype !== this.dtype) {
+ throw new Error(`TensorArray dtype is ${this.dtype} but concat requested dtype ${dtype}`);
+ }
+ if (this.size() === 0) {
+ return Object(dist["tensor"])([], [0].concat(this.elementShape));
+ }
+ const indices = [];
+ for (let i = 0; i < this.size(); i++) {
+ indices.push(i);
+ }
+ // Collect all the tensors from the tensors array.
+ const tensors = this.readMany(indices);
+ assertShapesMatchAllowUndefinedSize(this.elementShape, tensors[0].shape, `TensorArray shape mismatch: tensor array shape (${this.elementShape}) vs first tensor shape (${tensors[0].shape})`);
+ return Object(dist["concat"])(tensors, 0);
+ }
+ /**
+ * Scatter the values of a Tensor in specific indices of a TensorArray.
+ * @param indices nummber[] values in [0, max_value). If the
+ * TensorArray is not dynamic, max_value=size().
+ * @param tensor Tensor input tensor.
+ */
+ scatter(indices, tensor) {
+ if (tensor.dtype !== this.dtype) {
+ throw new Error(`TensorArray dtype is ${this.dtype} but tensor has dtype ${tensor.dtype}`);
+ }
+ if (indices.length !== tensor.shape[0]) {
+ throw new Error(`Expected len(indices) == tensor.shape[0], but saw: ${indices.length} vs. ${tensor.shape[0]}`);
+ }
+ const maxIndex = Math.max(...indices);
+ if (!this.dynamicSize && maxIndex >= this.maxSize) {
+ throw new Error(`Max index must be < array size (${maxIndex} vs. ${this.maxSize})`);
+ }
+ this.writeMany(indices, Object(dist["unstack"])(tensor, 0));
+ }
+ /**
+ * Split the values of a Tensor into the TensorArray.
+ * @param length number[] with the lengths to use when splitting value along
+ * its first dimension.
+ * @param tensor Tensor, the tensor to split.
+ */
+ split(length, tensor) {
+ if (tensor.dtype !== this.dtype) {
+ throw new Error(`TensorArray dtype is ${this.dtype} but tensor has dtype ${tensor.dtype}`);
+ }
+ let totalLength = 0;
+ const cumulativeLengths = length.map(len => {
+ totalLength += len;
+ return totalLength;
+ });
+ if (totalLength !== tensor.shape[0]) {
+ throw new Error(`Expected sum of lengths to be equal to
+ tensor.shape[0], but sum of lengths is
+ ${totalLength}, and tensor's shape is: ${tensor.shape}`);
+ }
+ if (!this.dynamicSize && length.length !== this.maxSize) {
+ throw new Error(`TensorArray's size is not equal to the size of lengths (${this.maxSize} vs. ${length.length}), ` +
+ 'and the TensorArray is not marked as dynamically resizeable');
+ }
+ const elementPerRow = totalLength === 0 ? 0 : tensor.size / totalLength;
+ const tensors = [];
+ Object(dist["tidy"])(() => {
+ tensor = tensor.reshape([1, totalLength, elementPerRow]);
+ for (let i = 0; i < length.length; ++i) {
+ const previousLength = (i === 0) ? 0 : cumulativeLengths[i - 1];
+ const indices = [0, previousLength, 0];
+ const sizes = [1, length[i], elementPerRow];
+ tensors[i] = Object(dist["slice"])(tensor, indices, sizes).reshape(this.elementShape);
+ }
+ return tensors;
+ });
+ const indices = [];
+ for (let i = 0; i < length.length; i++) {
+ indices[i] = i;
+ }
+ this.writeMany(indices, tensors);
+ }
+}
+tensor_array_TensorArray.nextId = 0;
+//# sourceMappingURL=tensor_array.js.map
+// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-converter/dist/operations/executors/control_executor.js
+/**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+
+
+const control_executor_executeOp = async (node, tensorMap, context) => {
+ switch (node.op) {
+ case 'If':
+ case 'StatelessIf': {
+ const thenFunc = Object(utils["b" /* getParamValue */])('thenBranch', node, tensorMap, context);
+ const elseFunc = Object(utils["b" /* getParamValue */])('elseBranch', node, tensorMap, context);
+ const cond = Object(utils["b" /* getParamValue */])('cond', node, tensorMap, context);
+ const args = Object(utils["b" /* getParamValue */])('args', node, tensorMap, context);
+ const condValue = await cond.data();
+ if (condValue[0]) {
+ return context.functionMap[thenFunc].executeFunctionAsync(args);
+ }
+ else {
+ return context.functionMap[elseFunc].executeFunctionAsync(args);
+ }
+ }
+ case 'While':
+ case 'StatelessWhile': {
+ const bodyFunc = Object(utils["b" /* getParamValue */])('body', node, tensorMap, context);
+ const condFunc = Object(utils["b" /* getParamValue */])('cond', node, tensorMap, context);
+ const args = Object(utils["b" /* getParamValue */])('args', node, tensorMap, context);
+ const condTensor = (await context.functionMap[condFunc].executeFunctionAsync(args))[0];
+ let condValue = await condTensor.data();
+ let result = args;
+ while (condValue[0]) {
+ result =
+ await context.functionMap[bodyFunc].executeFunctionAsync(result);
+ const condTensor = (await context.functionMap[condFunc].executeFunctionAsync(result))[0];
+ condValue = await condTensor.data();
+ }
+ return result;
+ }
+ case 'LoopCond':
+ return [
+ Object(utils["b" /* getParamValue */])('pred', node, tensorMap, context).clone()
+ ];
+ case 'Switch': {
+ const pred = Object(utils["b" /* getParamValue */])('pred', node, tensorMap, context);
+ const data = Object(utils["b" /* getParamValue */])('data', node, tensorMap, context);
+ // Outputs nodes :0 => false, :1 => true
+ return (await pred.data())[0] ? [undefined, data.clone()] :
+ [data.clone(), undefined];
+ }
+ case 'Merge':
+ const inputName = node.inputNames.find(name => Object(utils["c" /* getTensor */])(name, tensorMap, context) !== undefined);
+ return inputName ? [Object(utils["c" /* getTensor */])(inputName, tensorMap, context).clone()] :
+ undefined;
+ case 'Enter':
+ const frameId = Object(utils["b" /* getParamValue */])('frameName', node, tensorMap, context);
+ const data = Object(utils["b" /* getParamValue */])('tensor', node, tensorMap, context);
+ context.enterFrame(frameId);
+ return [data.clone()];
+ case 'Exit':
+ const tensor = Object(utils["b" /* getParamValue */])('tensor', node, tensorMap, context);
+ context.exitFrame();
+ return [tensor.clone()];
+ case 'NextIteration':
+ const input = Object(utils["b" /* getParamValue */])('tensor', node, tensorMap, context);
+ context.nextIteration();
+ return [input.clone()];
+ case 'TensorArrayV3':
+ const size = Object(utils["b" /* getParamValue */])('size', node, tensorMap, context);
+ const dtype = Object(utils["b" /* getParamValue */])('dtype', node, tensorMap, context);
+ const elementShape = Object(utils["b" /* getParamValue */])('elementShape', node, tensorMap, context);
+ const dynamicSize = Object(utils["b" /* getParamValue */])('dynamicSize', node, tensorMap, context);
+ const clearAfterRead = Object(utils["b" /* getParamValue */])('clearAfterRead', node, tensorMap, context);
+ const identicalElementShapes = Object(utils["b" /* getParamValue */])('identicalElementShapes', node, tensorMap, context);
+ const name = Object(utils["b" /* getParamValue */])('name', node, tensorMap, context);
+ const tensorArray = new tensor_array_TensorArray(name, dtype, size, elementShape, identicalElementShapes, dynamicSize, clearAfterRead);
+ context.addTensorArray(tensorArray);
+ return [Object(dist["scalar"])(tensorArray.id), Object(dist["scalar"])(1.0)];
+ case 'TensorArrayWriteV3':
+ const id = Object(utils["b" /* getParamValue */])('tensorArrayId', node, tensorMap, context);
+ const index = Object(utils["b" /* getParamValue */])('index', node, tensorMap, context);
+ const writeTensor = Object(utils["b" /* getParamValue */])('tensor', node, tensorMap, context);
+ const writeTensorArray = context.getTensorArray(id);
+ writeTensorArray.write(index, writeTensor);
+ return [Object(dist["scalar"])(1.0)];
+ case 'TensorArrayReadV3':
+ const readId = Object(utils["b" /* getParamValue */])('tensorArrayId', node, tensorMap, context);
+ const readIndex = Object(utils["b" /* getParamValue */])('index', node, tensorMap, context);
+ const readTensorArray = context.getTensorArray(readId);
+ return [readTensorArray.read(readIndex)];
+ case 'TensorArrayGatherV3':
+ const gatherId = Object(utils["b" /* getParamValue */])('tensorArrayId', node, tensorMap, context);
+ const gatherIndices = Object(utils["b" /* getParamValue */])('indices', node, tensorMap, context);
+ const gatherDtype = Object(utils["b" /* getParamValue */])('dtype', node, tensorMap, context);
+ const gatherTensorArray = context.getTensorArray(gatherId);
+ return [gatherTensorArray.gather(gatherIndices, gatherDtype)];
+ case 'TensorArrayScatterV3':
+ const scatterId = Object(utils["b" /* getParamValue */])('tensorArrayId', node, tensorMap, context);
+ const scatterIndices = Object(utils["b" /* getParamValue */])('indices', node, tensorMap, context);
+ const scatterTensor = Object(utils["b" /* getParamValue */])('tensor', node, tensorMap, context);
+ const scatterTensorArray = context.getTensorArray(scatterId);
+ scatterTensorArray.scatter(scatterIndices, scatterTensor);
+ return [Object(dist["scalar"])(1.0)];
+ case 'TensorArrayConcatV3':
+ const concatId = Object(utils["b" /* getParamValue */])('tensorArrayId', node, tensorMap, context);
+ const concatTensorArray = context.getTensorArray(concatId);
+ const concatDtype = Object(utils["b" /* getParamValue */])('dtype', node, tensorMap, context);
+ return [concatTensorArray.concat(concatDtype)];
+ case 'TensorArraySplitV3':
+ const splitId = Object(utils["b" /* getParamValue */])('tensorArrayId', node, tensorMap, context);
+ const splitTensor = Object(utils["b" /* getParamValue */])('tensor', node, tensorMap, context);
+ const lengths = Object(utils["b" /* getParamValue */])('lengths', node, tensorMap, context);
+ const splitTensorArray = context.getTensorArray(splitId);
+ splitTensorArray.split(lengths, splitTensor);
+ return [Object(dist["scalar"])(1.0)];
+ case 'TensorArraySizeV3':
+ const sizeId = Object(utils["b" /* getParamValue */])('tensorArrayId', node, tensorMap, context);
+ const sizeTensorArray = context.getTensorArray(sizeId);
+ return [Object(dist["scalar"])(sizeTensorArray.size(), 'int32')];
+ case 'TensorArrayCloseV3':
+ const closeId = Object(utils["b" /* getParamValue */])('tensorArrayId', node, tensorMap, context);
+ const closeTensorArray = context.getTensorArray(closeId);
+ closeTensorArray.clearAndClose();
+ return [Object(dist["scalar"])(0)];
+ default:
+ throw TypeError(`Node type ${node.op} is not implemented`);
+ }
+};
+const control_executor_CATEGORY = 'control';
+//# sourceMappingURL=control_executor.js.map
+// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-converter/dist/operations/executors/convolution_executor.js
+/**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+
+const convolution_executor_executeOp = (node, tensorMap, context) => {
+ switch (node.op) {
+ case 'Conv1D': {
+ const stride = Object(utils["b" /* getParamValue */])('stride', node, tensorMap, context);
+ const pad = Object(utils["b" /* getParamValue */])('pad', node, tensorMap, context);
+ const dataFormat = Object(utils["b" /* getParamValue */])('dataFormat', node, tensorMap, context)
+ .toUpperCase();
+ const dilation = Object(utils["b" /* getParamValue */])('dilation', node, tensorMap, context);
+ return [dist["conv1d"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context), Object(utils["b" /* getParamValue */])('filter', node, tensorMap, context), stride, pad, dataFormat, dilation)];
+ }
+ case 'Conv2D': {
+ const stride = Object(utils["b" /* getParamValue */])('strides', node, tensorMap, context);
+ const pad = Object(utils["b" /* getParamValue */])('pad', node, tensorMap, context);
+ const dataFormat = Object(utils["b" /* getParamValue */])('dataFormat', node, tensorMap, context)
+ .toUpperCase();
+ const dilations = Object(utils["b" /* getParamValue */])('dilations', node, tensorMap, context);
+ return [dist["conv2d"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context), Object(utils["b" /* getParamValue */])('filter', node, tensorMap, context), [stride[1], stride[2]], pad, dataFormat, [dilations[1], dilations[2]])];
+ }
+ case '_FusedConv2D':
+ case 'FusedDepthwiseConv2dNative': {
+ const [extraOp, activationFunc] = Object(utils["b" /* getParamValue */])('fusedOps', node, tensorMap, context);
+ const isBiasAdd = extraOp === 'biasadd';
+ const isPrelu = activationFunc === 'prelu';
+ const isBatchNorm = extraOp === 'fusedbatchnorm';
+ const numArgs = Object(utils["b" /* getParamValue */])('numArgs', node, tensorMap, context);
+ if (isBiasAdd) {
+ if (isPrelu && numArgs !== 2) {
+ throw new Error('FusedConv2d and DepthwiseConv2d with BiasAdd and Prelu ' +
+ 'must have two extra arguments: bias and alpha.');
+ }
+ if (!isPrelu && numArgs !== 1) {
+ throw new Error('FusedConv2d and DepthwiseConv2d with BiasAdd must have ' +
+ 'one extra argument: bias.');
+ }
+ }
+ if (isBatchNorm) {
+ throw new Error('FusedConv2d and DepthwiseConv2d with FusedBatchNorm is not supported.');
+ }
+ const stride = Object(utils["b" /* getParamValue */])('strides', node, tensorMap, context);
+ const pad = Object(utils["b" /* getParamValue */])('pad', node, tensorMap, context);
+ const dataFormat = Object(utils["b" /* getParamValue */])('dataFormat', node, tensorMap, context)
+ .toUpperCase();
+ const dilations = Object(utils["b" /* getParamValue */])('dilations', node, tensorMap, context);
+ const [biasArg, preluArg] = Object(utils["b" /* getParamValue */])('args', node, tensorMap, context);
+ const kernelMethod = node.op === '_FusedConv2D' ?
+ dist["fused"].conv2d :
+ dist["fused"].depthwiseConv2d;
+ return [kernelMethod({
+ x: Object(utils["b" /* getParamValue */])('x', node, tensorMap, context),
+ filter: Object(utils["b" /* getParamValue */])('filter', node, tensorMap, context),
+ strides: [stride[1], stride[2]],
+ pad: pad,
+ dataFormat: dataFormat,
+ dilations: [dilations[1], dilations[2]],
+ bias: biasArg,
+ activation: activationFunc,
+ preluActivationWeights: preluArg
+ })];
+ }
+ case 'Conv2DBackpropInput':
+ case 'Conv2dTranspose': {
+ const shape = Object(utils["b" /* getParamValue */])('outputShape', node, tensorMap, context);
+ const stride = Object(utils["b" /* getParamValue */])('strides', node, tensorMap, context);
+ const pad = Object(utils["b" /* getParamValue */])('pad', node, tensorMap, context);
+ return [dist["conv2dTranspose"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context), Object(utils["b" /* getParamValue */])('filter', node, tensorMap, context), shape, [stride[1], stride[2]], pad)];
+ }
+ case 'DepthwiseConv2dNative':
+ case 'DepthwiseConv2d': {
+ const stride = Object(utils["b" /* getParamValue */])('strides', node, tensorMap, context);
+ const pad = Object(utils["b" /* getParamValue */])('pad', node, tensorMap, context);
+ const dilations = Object(utils["b" /* getParamValue */])('dilations', node, tensorMap, context);
+ const dataFormat = Object(utils["b" /* getParamValue */])('dataFormat', node, tensorMap, context)
+ .toUpperCase();
+ return [dist["depthwiseConv2d"](Object(utils["b" /* getParamValue */])('input', node, tensorMap, context), Object(utils["b" /* getParamValue */])('filter', node, tensorMap, context), [stride[1], stride[2]], pad, dataFormat, [dilations[1], dilations[2]])];
+ }
+ case 'Conv3D': {
+ const stride = Object(utils["b" /* getParamValue */])('strides', node, tensorMap, context);
+ const pad = Object(utils["b" /* getParamValue */])('pad', node, tensorMap, context);
+ const dataFormat = Object(utils["b" /* getParamValue */])('dataFormat', node, tensorMap, context)
+ .toUpperCase();
+ const dilations = Object(utils["b" /* getParamValue */])('dilations', node, tensorMap, context);
+ return [dist["conv3d"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context), Object(utils["b" /* getParamValue */])('filter', node, tensorMap, context), [stride[1], stride[2], stride[3]], pad, dataFormat, [dilations[1], dilations[2], dilations[3]])];
+ }
+ case 'AvgPool': {
+ const stride = Object(utils["b" /* getParamValue */])('strides', node, tensorMap, context);
+ const pad = Object(utils["b" /* getParamValue */])('pad', node, tensorMap, context);
+ const kernelSize = Object(utils["b" /* getParamValue */])('kernelSize', node, tensorMap, context);
+ return [dist["avgPool"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context), [kernelSize[1], kernelSize[2]], [stride[1], stride[2]], pad)];
+ }
+ case 'MaxPool': {
+ const stride = Object(utils["b" /* getParamValue */])('strides', node, tensorMap, context);
+ const pad = Object(utils["b" /* getParamValue */])('pad', node, tensorMap, context);
+ const kernelSize = Object(utils["b" /* getParamValue */])('kernelSize', node, tensorMap, context);
+ return [dist["maxPool"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context), [kernelSize[1], kernelSize[2]], [stride[1], stride[2]], pad)];
+ }
+ case 'MaxPoolWithArgmax': {
+ const stride = Object(utils["b" /* getParamValue */])('strides', node, tensorMap, context);
+ const pad = Object(utils["b" /* getParamValue */])('pad', node, tensorMap, context);
+ const kernelSize = Object(utils["b" /* getParamValue */])('kernelSize', node, tensorMap, context);
+ const includeBatchInIndex = Object(utils["b" /* getParamValue */])('includeBatchInIndex', node, tensorMap, context);
+ const { result, indexes } = dist["maxPoolWithArgmax"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context), [kernelSize[1], kernelSize[2]], [stride[1], stride[2]], pad, includeBatchInIndex);
+ return [result, indexes];
+ }
+ case 'AvgPool3D': {
+ const stride = Object(utils["b" /* getParamValue */])('strides', node, tensorMap, context);
+ const pad = Object(utils["b" /* getParamValue */])('pad', node, tensorMap, context);
+ const kernelSize = Object(utils["b" /* getParamValue */])('kernelSize', node, tensorMap, context);
+ return [dist["avgPool3d"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context), [kernelSize[1], kernelSize[2], kernelSize[3]], [stride[1], stride[2], stride[3]], pad)];
+ }
+ case 'MaxPool3D': {
+ const stride = Object(utils["b" /* getParamValue */])('strides', node, tensorMap, context);
+ const pad = Object(utils["b" /* getParamValue */])('pad', node, tensorMap, context);
+ const kernelSize = Object(utils["b" /* getParamValue */])('kernelSize', node, tensorMap, context);
+ return [dist["maxPool3d"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context), [kernelSize[1], kernelSize[2], kernelSize[3]], [stride[1], stride[2], stride[3]], pad)];
+ }
+ default:
+ throw TypeError(`Node type ${node.op} is not implemented`);
+ }
+};
+const convolution_executor_CATEGORY = 'convolution';
+//# sourceMappingURL=convolution_executor.js.map
+// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-converter/dist/operations/executors/creation_executor.js
+/**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+
+const creation_executor_executeOp = (node, tensorMap, context) => {
+ switch (node.op) {
+ case 'Fill': {
+ const shape = Object(utils["b" /* getParamValue */])('shape', node, tensorMap, context);
+ const dtype = Object(utils["b" /* getParamValue */])('dtype', node, tensorMap, context);
+ const value = Object(utils["b" /* getParamValue */])('value', node, tensorMap, context);
+ return [dist["fill"](shape, value, dtype)];
+ }
+ case 'LinSpace': {
+ const start = Object(utils["b" /* getParamValue */])('start', node, tensorMap, context);
+ const stop = Object(utils["b" /* getParamValue */])('stop', node, tensorMap, context);
+ const num = Object(utils["b" /* getParamValue */])('num', node, tensorMap, context);
+ return [dist["linspace"](start, stop, num)];
+ }
+ case 'Multinomial': {
+ const logits = Object(utils["b" /* getParamValue */])('logits', node, tensorMap, context);
+ const numSamples = Object(utils["b" /* getParamValue */])('numSamples', node, tensorMap, context);
+ const seed = Object(utils["b" /* getParamValue */])('seed', node, tensorMap, context);
+ return [dist["multinomial"](logits, numSamples, seed)];
+ }
+ case 'OneHot': {
+ const indices = Object(utils["b" /* getParamValue */])('indices', node, tensorMap, context);
+ const depth = Object(utils["b" /* getParamValue */])('depth', node, tensorMap, context);
+ const onValue = Object(utils["b" /* getParamValue */])('onValue', node, tensorMap, context);
+ const offValue = Object(utils["b" /* getParamValue */])('offValue', node, tensorMap, context);
+ return [dist["oneHot"](indices, depth, onValue, offValue)];
+ }
+ case 'Ones': {
+ return [dist["ones"](Object(utils["b" /* getParamValue */])('shape', node, tensorMap, context), Object(utils["b" /* getParamValue */])('dtype', node, tensorMap, context))];
+ }
+ case 'OnesLike': {
+ return [dist["onesLike"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context))];
+ }
+ case 'RandomUniform': {
+ return [dist["randomUniform"](
+ // tslint:disable-next-line:no-any
+ Object(utils["b" /* getParamValue */])('shape', node, tensorMap, context), Object(utils["b" /* getParamValue */])('minval', node, tensorMap, context), Object(utils["b" /* getParamValue */])('maxval', node, tensorMap, context), Object(utils["b" /* getParamValue */])('dtype', node, tensorMap, context))];
+ }
+ case 'Range': {
+ const start = Object(utils["b" /* getParamValue */])('start', node, tensorMap, context);
+ const stop = Object(utils["b" /* getParamValue */])('stop', node, tensorMap, context);
+ const step = Object(utils["b" /* getParamValue */])('step', node, tensorMap, context);
+ return [dist["range"](start, stop, step, Object(utils["b" /* getParamValue */])('dtype', node, tensorMap, context))];
+ }
+ case 'TruncatedNormal': {
+ const shape = Object(utils["b" /* getParamValue */])('shape', node, tensorMap, context);
+ const mean = Object(utils["b" /* getParamValue */])('mean', node, tensorMap, context);
+ const stdDev = Object(utils["b" /* getParamValue */])('stdDev', node, tensorMap, context);
+ const seed = Object(utils["b" /* getParamValue */])('seed', node, tensorMap, context);
+ return [dist["truncatedNormal"](shape, mean, stdDev, Object(utils["b" /* getParamValue */])('dtype', node, tensorMap, context), seed)];
+ }
+ case 'Zeros': {
+ return [dist["zeros"](Object(utils["b" /* getParamValue */])('shape', node, tensorMap, context), Object(utils["b" /* getParamValue */])('dtype', node, tensorMap, context))];
+ }
+ case 'ZerosLike': {
+ return [dist["zerosLike"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context))];
+ }
+ default:
+ throw TypeError(`Node type ${node.op} is not implemented`);
+ }
+};
+const creation_executor_CATEGORY = 'creation';
+//# sourceMappingURL=creation_executor.js.map
+// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-converter/dist/operations/executors/dynamic_executor.js
+/**
+ * @license
+ * Copyright 2018 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+
+const dynamic_executor_executeOp = async (node, tensorMap, context) => {
+ switch (node.op) {
+ case 'NonMaxSuppressionV5':
+ case 'NonMaxSuppressionV3':
+ case 'NonMaxSuppressionV2': {
+ const boxes = Object(utils["b" /* getParamValue */])('boxes', node, tensorMap, context);
+ const scores = Object(utils["b" /* getParamValue */])('scores', node, tensorMap, context);
+ const maxOutputSize = Object(utils["b" /* getParamValue */])('maxOutputSize', node, tensorMap, context);
+ const iouThreshold = Object(utils["b" /* getParamValue */])('iouThreshold', node, tensorMap, context);
+ const scoreThreshold = Object(utils["b" /* getParamValue */])('scoreThreshold', node, tensorMap, context);
+ if (node.op === 'NonMaxSuppressionV5') {
+ const softNmsSigma = Object(utils["b" /* getParamValue */])('softNmsSigma', node, tensorMap, context);
+ const result = await dist["image"].nonMaxSuppressionWithScoreAsync(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma);
+ return [result.selectedIndices, result.selectedScores];
+ }
+ return [await dist["image"].nonMaxSuppressionAsync(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold)];
+ }
+ case 'Where': {
+ const condition = Object(utils["b" /* getParamValue */])('condition', node, tensorMap, context)
+ .asType('bool');
+ const result = [await dist["whereAsync"](condition)];
+ condition.dispose();
+ return result;
+ }
+ case 'ListDiff': {
+ return dist["setdiff1dAsync"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context), Object(utils["b" /* getParamValue */])('y', node, tensorMap, context));
+ }
+ default:
+ throw TypeError(`Node type ${node.op} is not implemented`);
+ }
+};
+const dynamic_executor_CATEGORY = 'dynamic';
+//# sourceMappingURL=dynamic_executor.js.map
+// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-converter/dist/operations/executors/evaluation_executor.js
+/**
+ * @license
+ * Copyright 2018 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+
+const evaluation_executor_executeOp = (node, tensorMap, context) => {
+ switch (node.op) {
+ case 'TopKV2': {
+ const x = Object(utils["b" /* getParamValue */])('x', node, tensorMap, context);
+ const k = Object(utils["b" /* getParamValue */])('k', node, tensorMap, context);
+ const sorted = Object(utils["b" /* getParamValue */])('sorted', node, tensorMap, context);
+ const result = dist["topk"](x, k, sorted);
+ return [result.values, result.indices];
+ }
+ default:
+ throw TypeError(`Node type ${node.op} is not implemented`);
+ }
+};
+const evaluation_executor_CATEGORY = 'evaluation';
+//# sourceMappingURL=evaluation_executor.js.map
+// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-converter/dist/operations/executors/graph_executor.js
+/**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+
+const graph_executor_executeOp = (node, tensorMap, context) => {
+ switch (node.op) {
+ case 'Const': {
+ return tensorMap[node.name];
+ }
+ case 'PlaceholderWithDefault':
+ const def = Object(utils["b" /* getParamValue */])('default', node, tensorMap, context);
+ return [Object(utils["c" /* getTensor */])(node.name, tensorMap, context) || def];
+ case 'Placeholder':
+ return [Object(utils["c" /* getTensor */])(node.name, tensorMap, context)];
+ case 'Identity':
+ case 'StopGradient':
+ case 'FakeQuantWithMinMaxVars': // This op is currently ignored.
+ return [
+ Object(utils["b" /* getParamValue */])('x', node, tensorMap, context).clone()
+ ];
+ case 'IdentityN':
+ return Object(utils["b" /* getParamValue */])('x', node, tensorMap, context)
+ .map((t) => t.clone());
+ case 'Snapshot':
+ const snapshot = Object(utils["b" /* getParamValue */])('x', node, tensorMap, context);
+ return [snapshot.clone()];
+ case 'Shape':
+ return [dist["tensor1d"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context).shape, 'int32')];
+ case 'ShapeN':
+ return Object(utils["b" /* getParamValue */])('x', node, tensorMap, context)
+ .map((t) => dist["tensor1d"](t.shape));
+ case 'Size':
+ return [dist["scalar"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context).size, 'int32')];
+ case 'Rank':
+ return [dist["scalar"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context).rank, 'int32')];
+ case 'NoOp':
+ return [dist["scalar"](1)];
+ case 'Print':
+ const input = Object(utils["b" /* getParamValue */])('x', node, tensorMap, context);
+ const data = Object(utils["b" /* getParamValue */])('data', node, tensorMap, context);
+ const message = Object(utils["b" /* getParamValue */])('message', node, tensorMap, context);
+ const summarize = Object(utils["b" /* getParamValue */])('summarize', node, tensorMap, context);
+ console.warn('The graph has a tf.print() operation,' +
+ 'usually used for debugging, which slows down performance.');
+ console.log(message);
+ for (let i = 0; i < data.length; i++) {
+ console.log(Array.prototype.slice.call(data[i].dataSync()).slice(0, summarize));
+ }
+ return [input];
+ default:
+ throw TypeError(`Node type ${node.op} is not implemented`);
+ }
+};
+const graph_executor_CATEGORY = 'graph';
+//# sourceMappingURL=graph_executor.js.map
+// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-converter/dist/operations/executors/image_executor.js
+/**
+ * @license
+ * Copyright 2018 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+
+const image_executor_executeOp = (node, tensorMap, context) => {
+ switch (node.op) {
+ case 'ResizeBilinear': {
+ const images = Object(utils["b" /* getParamValue */])('images', node, tensorMap, context);
+ const size = Object(utils["b" /* getParamValue */])('size', node, tensorMap, context);
+ const alignCorners = Object(utils["b" /* getParamValue */])('alignCorners', node, tensorMap, context);
+ return [dist["image"].resizeBilinear(images, [size[0], size[1]], alignCorners)];
+ }
+ case 'ResizeNearestNeighbor': {
+ const images = Object(utils["b" /* getParamValue */])('images', node, tensorMap, context);
+ const size = Object(utils["b" /* getParamValue */])('size', node, tensorMap, context);
+ const alignCorners = Object(utils["b" /* getParamValue */])('alignCorners', node, tensorMap, context);
+ return [dist["image"].resizeNearestNeighbor(images, [size[0], size[1]], alignCorners)];
+ }
+ case 'CropAndResize': {
+ const image = Object(utils["b" /* getParamValue */])('image', node, tensorMap, context);
+ const boxes = Object(utils["b" /* getParamValue */])('boxes', node, tensorMap, context);
+ const boxInd = Object(utils["b" /* getParamValue */])('boxInd', node, tensorMap, context);
+ const cropSize = Object(utils["b" /* getParamValue */])('cropSize', node, tensorMap, context);
+ const method = Object(utils["b" /* getParamValue */])('method', node, tensorMap, context);
+ const extrapolationValue = Object(utils["b" /* getParamValue */])('extrapolationValue', node, tensorMap, context);
+ return [dist["image"].cropAndResize(image, boxes, boxInd, cropSize, method, extrapolationValue)];
+ }
+ default:
+ throw TypeError(`Node type ${node.op} is not implemented`);
+ }
+};
+const image_executor_CATEGORY = 'image';
+//# sourceMappingURL=image_executor.js.map
+// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-converter/dist/operations/executors/logical_executor.js
+/**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+
+const logical_executor_executeOp = (node, tensorMap, context) => {
+ switch (node.op) {
+ case 'Equal': {
+ return [dist["equal"](Object(utils["b" /* getParamValue */])('a', node, tensorMap, context), Object(utils["b" /* getParamValue */])('b', node, tensorMap, context))];
+ }
+ case 'NotEqual': {
+ return [dist["notEqual"](Object(utils["b" /* getParamValue */])('a', node, tensorMap, context), Object(utils["b" /* getParamValue */])('b', node, tensorMap, context))];
+ }
+ case 'Greater': {
+ return [dist["greater"](Object(utils["b" /* getParamValue */])('a', node, tensorMap, context), Object(utils["b" /* getParamValue */])('b', node, tensorMap, context))];
+ }
+ case 'GreaterEqual': {
+ return [dist["greaterEqual"](Object(utils["b" /* getParamValue */])('a', node, tensorMap, context), Object(utils["b" /* getParamValue */])('b', node, tensorMap, context))];
+ }
+ case 'Less': {
+ return [dist["less"](Object(utils["b" /* getParamValue */])('a', node, tensorMap, context), Object(utils["b" /* getParamValue */])('b', node, tensorMap, context))];
+ }
+ case 'LessEqual': {
+ return [dist["lessEqual"](Object(utils["b" /* getParamValue */])('a', node, tensorMap, context), Object(utils["b" /* getParamValue */])('b', node, tensorMap, context))];
+ }
+ case 'LogicalAnd': {
+ return [dist["logicalAnd"](Object(utils["b" /* getParamValue */])('a', node, tensorMap, context), Object(utils["b" /* getParamValue */])('b', node, tensorMap, context))];
+ }
+ case 'LogicalNot': {
+ return [dist["logicalNot"](Object(utils["b" /* getParamValue */])('a', node, tensorMap, context))];
+ }
+ case 'LogicalOr': {
+ return [dist["logicalOr"](Object(utils["b" /* getParamValue */])('a', node, tensorMap, context), Object(utils["b" /* getParamValue */])('b', node, tensorMap, context))];
+ }
+ case 'Select':
+ case 'SelectV2': {
+ return [dist["where"](Object(utils["b" /* getParamValue */])('condition', node, tensorMap, context), Object(utils["b" /* getParamValue */])('a', node, tensorMap, context), Object(utils["b" /* getParamValue */])('b', node, tensorMap, context))];
+ }
+ default:
+ throw TypeError(`Node type ${node.op} is not implemented`);
+ }
+};
+const logical_executor_CATEGORY = 'logical';
+//# sourceMappingURL=logical_executor.js.map
+// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-converter/dist/operations/executors/matrices_executor.js
+/**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+
+const matrices_executor_executeOp = (node, tensorMap, context) => {
+ switch (node.op) {
+ case 'BatchMatMul':
+ case 'BatchMatMulV2':
+ case 'MatMul':
+ return [dist["matMul"](Object(utils["b" /* getParamValue */])('a', node, tensorMap, context), Object(utils["b" /* getParamValue */])('b', node, tensorMap, context), Object(utils["b" /* getParamValue */])('transposeA', node, tensorMap, context), Object(utils["b" /* getParamValue */])('transposeB', node, tensorMap, context))];
+ case 'Transpose':
+ return [dist["transpose"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context), Object(utils["b" /* getParamValue */])('perm', node, tensorMap, context))];
+ case '_FusedMatMul':
+ const [extraOp, activationFunc] = Object(utils["b" /* getParamValue */])('fusedOps', node, tensorMap, context);
+ const isBiasAdd = extraOp === 'biasadd';
+ const isPrelu = activationFunc === 'prelu';
+ const numArgs = Object(utils["b" /* getParamValue */])('numArgs', node, tensorMap, context);
+ if (isBiasAdd) {
+ if (isPrelu && numArgs !== 2) {
+ throw new Error('Fused MatMul with BiasAdd and Prelu must have two ' +
+ 'extra arguments: bias and alpha.');
+ }
+ if (!isPrelu && numArgs !== 1) {
+ throw new Error('Fused MatMul with BiasAdd must have one extra argument: bias.');
+ }
+ }
+ const [biasArg, preluArg] = Object(utils["b" /* getParamValue */])('args', node, tensorMap, context);
+ return [dist["fused"].matMul({
+ a: Object(utils["b" /* getParamValue */])('a', node, tensorMap, context),
+ b: Object(utils["b" /* getParamValue */])('b', node, tensorMap, context),
+ transposeA: Object(utils["b" /* getParamValue */])('transposeA', node, tensorMap, context),
+ transposeB: Object(utils["b" /* getParamValue */])('transposeB', node, tensorMap, context),
+ bias: biasArg,
+ activation: activationFunc,
+ preluActivationWeights: preluArg
+ })];
+ default:
+ throw TypeError(`Node type ${node.op} is not implemented`);
+ }
+};
+const matrices_executor_CATEGORY = 'matrices';
+//# sourceMappingURL=matrices_executor.js.map
+// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-converter/dist/operations/executors/normalization_executor.js
+/**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+
+const normalization_executor_executeOp = (node, tensorMap, context) => {
+ switch (node.op) {
+ case 'FusedBatchNorm':
+ case 'FusedBatchNormV2': {
+ return [dist["batchNorm"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context), Object(utils["b" /* getParamValue */])('mean', node, tensorMap, context), Object(utils["b" /* getParamValue */])('variance', node, tensorMap, context), Object(utils["b" /* getParamValue */])('offset', node, tensorMap, context), Object(utils["b" /* getParamValue */])('scale', node, tensorMap, context), Object(utils["b" /* getParamValue */])('epsilon', node, tensorMap, context))];
+ }
+ case 'FusedBatchNormV3': {
+ return [dist["batchNorm"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context), Object(utils["b" /* getParamValue */])('mean', node, tensorMap, context), Object(utils["b" /* getParamValue */])('variance', node, tensorMap, context), Object(utils["b" /* getParamValue */])('offset', node, tensorMap, context), Object(utils["b" /* getParamValue */])('scale', node, tensorMap, context), Object(utils["b" /* getParamValue */])('epsilon', node, tensorMap, context))];
+ }
+ case 'LRN': {
+ return [dist["localResponseNormalization"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context), Object(utils["b" /* getParamValue */])('radius', node, tensorMap, context), Object(utils["b" /* getParamValue */])('bias', node, tensorMap, context), Object(utils["b" /* getParamValue */])('alpha', node, tensorMap, context), Object(utils["b" /* getParamValue */])('beta', node, tensorMap, context))];
+ }
+ case 'Softmax': {
+ return [dist["softmax"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context))];
+ }
+ case 'LogSoftmax': {
+ return [dist["logSoftmax"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context))];
+ }
+ case 'SparseToDense': {
+ return [dist["sparseToDense"](Object(utils["b" /* getParamValue */])('sparseIndices', node, tensorMap, context), Object(utils["b" /* getParamValue */])('outputShape', node, tensorMap, context), Object(utils["b" /* getParamValue */])('sparseValues', node, tensorMap, context), Object(utils["b" /* getParamValue */])('defaultValue', node, tensorMap, context))];
+ }
+ default:
+ throw TypeError(`Node type ${node.op} is not implemented`);
+ }
+};
+const normalization_executor_CATEGORY = 'normalization';
+//# sourceMappingURL=normalization_executor.js.map
+// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-converter/dist/operations/executors/reduction_executor.js
+/**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+
+const reduction_executor_executeOp = (node, tensorMap, context) => {
+ switch (node.op) {
+ case 'Max': {
+ const axis = Object(utils["b" /* getParamValue */])('axis', node, tensorMap, context);
+ const keepDims = Object(utils["b" /* getParamValue */])('keepDims', node, tensorMap, context);
+ return [dist["max"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context), axis, keepDims)];
+ }
+ case 'Mean': {
+ const axis = Object(utils["b" /* getParamValue */])('axis', node, tensorMap, context);
+ const keepDims = Object(utils["b" /* getParamValue */])('keepDims', node, tensorMap, context);
+ return [dist["mean"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context), axis, keepDims)];
+ }
+ case 'Min': {
+ const axis = Object(utils["b" /* getParamValue */])('axis', node, tensorMap, context);
+ const keepDims = Object(utils["b" /* getParamValue */])('keepDims', node, tensorMap, context);
+ return [dist["min"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context), axis, keepDims)];
+ }
+ case 'Sum': {
+ const axis = Object(utils["b" /* getParamValue */])('axis', node, tensorMap, context);
+ const keepDims = Object(utils["b" /* getParamValue */])('keepDims', node, tensorMap, context);
+ return [dist["sum"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context), axis, keepDims)];
+ }
+ case 'All': {
+ const axis = Object(utils["b" /* getParamValue */])('axis', node, tensorMap, context);
+ const keepDims = Object(utils["b" /* getParamValue */])('keepDims', node, tensorMap, context);
+ return [dist["all"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context), axis, keepDims)];
+ }
+ case 'Any': {
+ const axis = Object(utils["b" /* getParamValue */])('axis', node, tensorMap, context);
+ const keepDims = Object(utils["b" /* getParamValue */])('keepDims', node, tensorMap, context);
+ return [dist["any"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context), axis, keepDims)];
+ }
+ case 'ArgMax': {
+ const axis = Object(utils["b" /* getParamValue */])('axis', node, tensorMap, context);
+ return [dist["argMax"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context), axis)];
+ }
+ case 'ArgMin': {
+ const axis = Object(utils["b" /* getParamValue */])('axis', node, tensorMap, context);
+ return [dist["argMin"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context), axis)];
+ }
+ case 'Prod': {
+ const axis = Object(utils["b" /* getParamValue */])('axis', node, tensorMap, context);
+ const keepDims = Object(utils["b" /* getParamValue */])('keepDims', node, tensorMap, context);
+ return [dist["prod"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context), axis, keepDims)];
+ }
+ case 'Cumsum': {
+ const axis = Object(utils["b" /* getParamValue */])('axis', node, tensorMap, context);
+ const exclusive = Object(utils["b" /* getParamValue */])('exclusive', node, tensorMap, context);
+ const reverse = Object(utils["b" /* getParamValue */])('reverse', node, tensorMap, context);
+ return [dist["cumsum"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context), axis, exclusive, reverse)];
+ }
+ default:
+ throw TypeError(`Node type ${node.op} is not implemented`);
+ }
+};
+const reduction_executor_CATEGORY = 'reduction';
+//# sourceMappingURL=reduction_executor.js.map
+// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-converter/dist/operations/executors/slice_join_executor.js
+/**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+
+const slice_join_executor_executeOp = (node, tensorMap, context) => {
+ switch (node.op) {
+ case 'ConcatV2':
+ case 'Concat': {
+ const n = Object(utils["b" /* getParamValue */])('n', node, tensorMap, context);
+ const axis = Object(utils["b" /* getParamValue */])('axis', node, tensorMap, context);
+ let inputs = Object(utils["b" /* getParamValue */])('tensors', node, tensorMap, context);
+ inputs = inputs.slice(0, n);
+ return [dist["concat"](inputs, axis)];
+ }
+ case 'GatherV2':
+ case 'Gather': {
+ const axis = Object(utils["b" /* getParamValue */])('axis', node, tensorMap, context);
+ const input = Object(utils["b" /* getParamValue */])('x', node, tensorMap, context);
+ const indices = Object(utils["b" /* getParamValue */])('indices', node, tensorMap, context);
+ return [dist["gather"](input, indices.asType('int32'), axis)];
+ }
+ case 'ReverseV2':
+ case 'Reverse': {
+ const axis = Object(utils["b" /* getParamValue */])('axis', node, tensorMap, context);
+ const input = Object(utils["b" /* getParamValue */])('x', node, tensorMap, context);
+ return [dist["reverse"](input, axis)];
+ }
+ case 'Slice': {
+ // tslint:disable-next-line:no-any
+ const begin = Object(utils["b" /* getParamValue */])('begin', node, tensorMap, context);
+ // tslint:disable-next-line:no-any
+ const size = Object(utils["b" /* getParamValue */])('size', node, tensorMap, context);
+ return [dist["slice"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context), begin, size)];
+ }
+ case 'StridedSlice': {
+ const begin = Object(utils["b" /* getParamValue */])('begin', node, tensorMap, context);
+ const end = Object(utils["b" /* getParamValue */])('end', node, tensorMap, context);
+ const strides = Object(utils["b" /* getParamValue */])('strides', node, tensorMap, context);
+ const beginMask = Object(utils["b" /* getParamValue */])('beginMask', node, tensorMap, context);
+ const endMask = Object(utils["b" /* getParamValue */])('endMask', node, tensorMap, context);
+ const ellipsisMask = Object(utils["b" /* getParamValue */])('ellipsisMask', node, tensorMap, context);
+ const newAxisMask = Object(utils["b" /* getParamValue */])('newAxisMask', node, tensorMap, context);
+ const shrinkAxisMask = Object(utils["b" /* getParamValue */])('shrinkAxisMask', node, tensorMap, context);
+ const tensor = Object(utils["b" /* getParamValue */])('x', node, tensorMap, context);
+ if (begin.length === 1 && tensor.shape.length > 1) {
+ for (let i = 1; i < tensor.shape.length; i++) {
+ begin.push(0);
+ end.push(tensor.shape[i]);
+ strides.push(strides[0]);
+ }
+ }
+ return [dist["stridedSlice"](tensor, begin, end, strides, beginMask, endMask, ellipsisMask, newAxisMask, shrinkAxisMask)];
+ }
+ case 'Pack': {
+ return dist["tidy"](() => {
+ const axis = Object(utils["b" /* getParamValue */])('axis', node, tensorMap, context);
+ const tensors = Object(utils["b" /* getParamValue */])('tensors', node, tensorMap, context);
+ // Reshape the tensors to the first tensor's shape if they don't match.
+ const shape = tensors[0].shape;
+ const squeezedShape = tensors[0].squeeze().shape;
+ const mapped = tensors.map(tensor => {
+ const sameShape = dist["util"].arraysEqual(tensor.shape, shape);
+ if (!sameShape &&
+ !dist["util"].arraysEqual(tensor.squeeze().shape, squeezedShape)) {
+ throw new Error('the input tensors shape does not match');
+ }
+ return sameShape ? tensor : tensor.reshape(shape);
+ });
+ return [dist["stack"](mapped, axis)];
+ });
+ }
+ case 'Unpack': {
+ return dist["tidy"](() => {
+ const axis = Object(utils["b" /* getParamValue */])('axis', node, tensorMap, context);
+ const tensor = Object(utils["b" /* getParamValue */])('tensor', node, tensorMap, context);
+ return dist["unstack"](tensor, axis);
+ });
+ }
+ case 'Tile': {
+ const reps = Object(utils["b" /* getParamValue */])('reps', node, tensorMap, context);
+ return [dist["tile"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context), reps)];
+ }
+ case 'Split':
+ case 'SplitV': {
+ const axis = Object(utils["b" /* getParamValue */])('axis', node, tensorMap, context);
+ const numOrSizeSplits = Object(utils["b" /* getParamValue */])('numOrSizeSplits', node, tensorMap, context);
+ return dist["split"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context), numOrSizeSplits, axis);
+ }
+ case 'ScatterNd': {
+ const indices = Object(utils["b" /* getParamValue */])('indices', node, tensorMap, context);
+ const values = Object(utils["b" /* getParamValue */])('values', node, tensorMap, context);
+ const shape = Object(utils["b" /* getParamValue */])('shape', node, tensorMap, context);
+ return [dist["scatterND"](indices, values, shape)];
+ }
+ case 'GatherNd': {
+ const x = Object(utils["b" /* getParamValue */])('x', node, tensorMap, context);
+ const indices = Object(utils["b" /* getParamValue */])('indices', node, tensorMap, context);
+ return [dist["gatherND"](x, indices)];
+ }
+ case 'SparseToDense': {
+ const indices = Object(utils["b" /* getParamValue */])('sparseIndices', node, tensorMap, context);
+ const shape = Object(utils["b" /* getParamValue */])('outputShape', node, tensorMap, context);
+ const sparseValues = Object(utils["b" /* getParamValue */])('sparseValues', node, tensorMap, context);
+ const defaultValue = Object(utils["b" /* getParamValue */])('defaultValue', node, tensorMap, context);
+ return [dist["sparseToDense"](indices, sparseValues, shape, sparseValues.dtype === defaultValue.dtype ?
+ defaultValue :
+ defaultValue.asType(sparseValues.dtype))];
+ }
+ default:
+ throw TypeError(`Node type ${node.op} is not implemented`);
+ }
+};
+const slice_join_executor_CATEGORY = 'slice_join';
+//# sourceMappingURL=slice_join_executor.js.map
+// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-converter/dist/operations/executors/spectral_executor.js
+/**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+
+const spectral_executor_executeOp = (node, tensorMap, context) => {
+ switch (node.op) {
+ case 'FFT': {
+ return [dist["fft"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context))];
+ }
+ case 'IFFT': {
+ return [dist["ifft"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context))];
+ }
+ case 'RFFT': {
+ return [dist["rfft"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context))];
+ }
+ case 'IRFFT': {
+ return [dist["irfft"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context))];
+ }
+ default:
+ throw TypeError(`Node type ${node.op} is not implemented`);
+ }
+};
+const spectral_executor_CATEGORY = 'spectral';
+//# sourceMappingURL=spectral_executor.js.map
+// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-converter/dist/operations/executors/transformation_executor.js
+/**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+
+const transformation_executor_executeOp = (node, tensorMap, context) => {
+ switch (node.op) {
+ case 'Cast': {
+ return [dist["cast"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context), Object(utils["b" /* getParamValue */])('dtype', node, tensorMap, context))];
+ }
+ case 'ExpandDims': {
+ const axis = Object(utils["b" /* getParamValue */])('axis', node, tensorMap, context);
+ return [dist["expandDims"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context), axis)];
+ }
+ case 'Squeeze': {
+ const axis = Object(utils["b" /* getParamValue */])('axis', node, tensorMap, context);
+ return [dist["squeeze"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context), axis)];
+ }
+ case 'Reshape': {
+ return [dist["reshape"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context), Object(utils["b" /* getParamValue */])('shape', node, tensorMap, context))];
+ }
+ case 'PadV2':
+ case 'Pad': {
+ return [dist["pad"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context), Object(utils["f" /* split */])(Object(utils["b" /* getParamValue */])('padding', node, tensorMap, context), 2), Object(utils["b" /* getParamValue */])('constantValue', node, tensorMap, context))];
+ }
+ case 'SpaceToBatchND': {
+ const blockShape = Object(utils["b" /* getParamValue */])('blockShape', node, tensorMap, context);
+ const paddings = Object(utils["f" /* split */])(Object(utils["b" /* getParamValue */])('paddings', node, tensorMap, context), 2);
+ return [dist["spaceToBatchND"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context), blockShape, paddings)];
+ }
+ case 'BatchToSpaceND': {
+ const blockShape = Object(utils["b" /* getParamValue */])('blockShape', node, tensorMap, context);
+ const crops = Object(utils["f" /* split */])(Object(utils["b" /* getParamValue */])('crops', node, tensorMap, context), 2);
+ return [dist["batchToSpaceND"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context), blockShape, crops)];
+ }
+ case 'DepthToSpace': {
+ const blockSize = Object(utils["b" /* getParamValue */])('blockSize', node, tensorMap, context);
+ const dataFormat = Object(utils["b" /* getParamValue */])('dataFormat', node, tensorMap, context).toUpperCase();
+ return [dist["depthToSpace"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context), blockSize, dataFormat)];
+ }
+ case 'BroadcastTo': {
+ return [dist["broadcastTo"](Object(utils["b" /* getParamValue */])('x', node, tensorMap, context), Object(utils["b" /* getParamValue */])('shape', node, tensorMap, context))];
+ }
+ default:
+ throw TypeError(`Node type ${node.op} is not implemented`);
+ }
+};
+const transformation_executor_CATEGORY = 'transformation';
+//# sourceMappingURL=transformation_executor.js.map
+// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-converter/dist/operations/operation_executor.js
+/**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+/**
+ * Executes the op defined by the node object.
+ * @param node
+ * @param tensorMap contains tensors for executed nodes and weights
+ */
+function operation_executor_executeOp(node, tensorMap, context) {
+ const value = ((node, tensorMap, context) => {
+ switch (node.category) {
+ case 'arithmetic':
+ return dist["tidy"](() => executeOp(node, tensorMap, context));
+ case 'basic_math':
+ return dist["tidy"](() => basic_math_executor_executeOp(node, tensorMap, context));
+ case 'control':
+ return control_executor_executeOp(node, tensorMap, context);
+ case 'convolution':
+ return dist["tidy"](() => convolution_executor_executeOp(node, tensorMap, context));
+ case 'creation':
+ return dist["tidy"](() => creation_executor_executeOp(node, tensorMap, context));
+ case 'dynamic':
+ return dynamic_executor_executeOp(node, tensorMap, context);
+ case 'evaluation':
+ return dist["tidy"](() => evaluation_executor_executeOp(node, tensorMap, context));
+ case 'image':
+ return dist["tidy"](() => image_executor_executeOp(node, tensorMap, context));
+ case 'graph':
+ return dist["tidy"](() => graph_executor_executeOp(node, tensorMap, context));
+ case 'logical':
+ return dist["tidy"](() => logical_executor_executeOp(node, tensorMap, context));
+ case 'matrices':
+ return dist["tidy"](() => matrices_executor_executeOp(node, tensorMap, context));
+ case 'normalization':
+ return dist["tidy"](() => normalization_executor_executeOp(node, tensorMap, context));
+ case 'reduction':
+ return dist["tidy"](() => reduction_executor_executeOp(node, tensorMap, context));
+ case 'slice_join':
+ return dist["tidy"](() => slice_join_executor_executeOp(node, tensorMap, context));
+ case 'spectral':
+ return dist["tidy"](() => spectral_executor_executeOp(node, tensorMap, context));
+ case 'transformation':
+ return dist["tidy"](() => transformation_executor_executeOp(node, tensorMap, context));
+ case 'custom':
+ const opMapper = Object(register["b" /* getRegisteredOp */])(node.op);
+ if (opMapper && opMapper.customExecutor) {
+ return opMapper.customExecutor(new node_value_impl_NodeValueImpl(node, tensorMap, context));
+ }
+ else {
+ throw TypeError(`Custom op ${node.op} is not registered.`);
+ }
+ default:
+ throw TypeError(`Unknown op '${node.op}'. File an issue at ` +
+ `https://github.com/tensorflow/tfjs/issues so we can add it` +
+ `, or register a custom execution with tf.registerOp()`);
+ }
+ })(node, tensorMap, context);
+ if (value instanceof Promise) {
+ return value.then((data) => [].concat(data));
+ }
+ return [].concat(value);
+}
+//# sourceMappingURL=operation_executor.js.map
+// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-converter/dist/executor/execution_context.js
+/**
+ * ExecutionContext captures the runtime environment of the node. It keeps
+ * track of the current frame and iteration for the control flow ops.
+ *
+ * For example, typical Dynamic RNN model may contain loops, for which
+ * TensorFlow will generate graphs with Enter/Exit nodes to control the
+ * current execution frame, and NextIteration Nodes for iteration id increment.
+ * For model with branch logic, TensorFLow will generate Switch/Merge ops.
+ */
+class ExecutionContext {
+ constructor(weightMap, tensorArrayMap, functionMap = {}) {
+ this.weightMap = weightMap;
+ this.tensorArrayMap = tensorArrayMap;
+ this.functionMap = functionMap;
+ this.rootContext = { id: 0, frameName: '', iterationId: 0 };
+ this.contexts = [this.rootContext];
+ this.lastId = 0;
+ this.generateCurrentContextIds();
+ }
+ newFrame(id, frameName) {
+ return { id, frameName, iterationId: 0 };
+ }
+ /**
+ * Set the current context
+ * @param contexts: ExecutionContextInfo[] the current path of execution
+ * frames
+ */
+ set currentContext(contexts) {
+ if (this.contexts !== contexts) {
+ this.contexts = contexts;
+ this.generateCurrentContextIds();
+ }
+ }
+ get currentContext() {
+ return this.contexts;
+ }
+ /**
+ * Returns the current context in string format.
+ */
+ get currentContextId() {
+ return this._currentContextIds[0];
+ }
+ /**
+ * Returns the current context and all parent contexts in string format.
+ * This allow access to the nodes in the current and parent frames.
+ */
+ get currentContextIds() {
+ return this._currentContextIds;
+ }
+ generateCurrentContextIds() {
+ const names = [];
+ for (let i = 0; i < this.contexts.length - 1; i++) {
+ const contexts = this.contexts.slice(0, this.contexts.length - i);
+ names.push(this.contextIdforContexts(contexts));
+ }
+ names.push('');
+ this._currentContextIds = names;
+ }
+ contextIdforContexts(contexts) {
+ return contexts ?
+ contexts
+ .map(context => (context.id === 0 && context.iterationId === 0) ?
+ '' :
+ `${context.frameName}-${context.iterationId}`)
+ .join('/') :
+ '';
+ }
+ /**
+ * Enter a new frame, a new context is pushed on the current context list.
+ * @param frameId new frame id
+ */
+ enterFrame(frameId) {
+ if (this.contexts) {
+ this.lastId++;
+ this.contexts = this.contexts.slice();
+ this.contexts.push(this.newFrame(this.lastId, frameId));
+ this._currentContextIds.unshift(this.contextIdforContexts(this.contexts));
+ }
+ }
+ /**
+ * Exit the current frame, the last context is removed from the current
+ * context list.
+ */
+ exitFrame() {
+ if (this.contexts && this.contexts.length > 1) {
+ this.contexts = this.contexts.slice();
+ this.contexts.splice(-1);
+ this.currentContextIds.shift();
+ }
+ else {
+ throw new Error('Cannot exit frame, the context is empty');
+ }
+ }
+ /**
+ * Enter the next iteration of a loop, the iteration id of last context is
+ * increased.
+ */
+ nextIteration() {
+ if (this.contexts && this.contexts.length > 0) {
+ this.contexts = this.contexts.slice();
+ this.lastId++;
+ const context = Object.assign({}, this.contexts[this.contexts.length - 1]);
+ context.iterationId += 1;
+ context.id = this.lastId;
+ this.contexts.splice(-1, 1, context);
+ this._currentContextIds.splice(0, 1, this.contextIdforContexts(this.contexts));
+ }
+ else {
+ throw new Error('Cannot increase frame iteration, the context is empty');
+ }
+ }
+ getWeight(name) {
+ return this.weightMap[name];
+ }
+ addTensorArray(tensorArray) {
+ this.tensorArrayMap[tensorArray.id] = tensorArray;
+ }
+ getTensorArray(id) {
+ return this.tensorArrayMap[id];
+ }
+}
+//# sourceMappingURL=execution_context.js.map
+// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-converter/dist/executor/model_analysis.js
+/**
+ * @license
+ * Copyright 2019 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+/**
+ * Given graph inputs and desired outputs, find the minimal set of nodes
+ * to execute in order to compute the outputs. In addition return other useful
+ * info such:
+ * - Missing inputs needed to compute the output.
+ * - Whether the subgraph contains dynamic ops (control flow, dynamic shape).
+ * - Alternative inputs in order to avoid async (dynamic op) execution.
+ */
+function getExecutionSubgraph(inputs, outputs, weightMap) {
+ const usedNodes = new Set();
+ const missingInputs = [];
+ let dynamicNode = null;
+ let syncInputs = null;
+ // Start with the outputs, going backwards and find all the nodes that are
+ // needed to compute those outputs.
+ const seen = new Set();
+ const inputNodeNames = Object.keys(inputs).map(name => Object(utils["e" /* parseNodeName */])(name)[0]);
+ const frontier = [...outputs];
+ while (frontier.length > 0) {
+ const node = frontier.pop();
+ if (isControlFlow(node) || isDynamicShape(node)) {
+ if (dynamicNode == null) {
+ dynamicNode = node;
+ syncInputs = dynamicNode.children.map(child => child.name)
+ .filter(name => usedNodes.has(name));
+ }
+ }
+ usedNodes.add(node.name);
+ // Weights are dead end since we already have their values.
+ if (weightMap[node.name] != null) {
+ continue;
+ }
+ // This node is a dead end since it's one of the user-provided inputs.
+ if (inputNodeNames.indexOf(node.name) !== -1) {
+ continue;
+ }
+ if (node.inputs.length === 0) {
+ missingInputs.push(node.name);
+ continue;
+ }
+ node.inputs.forEach(input => {
+ // Don't add to the frontier if it is already there.
+ if (seen.has(input.name)) {
+ return;
+ }
+ seen.add(input.name);
+ frontier.push(input);
+ });
+ }
+ return { inputs, outputs, usedNodes, missingInputs, dynamicNode, syncInputs };
+}
+/**
+ * Given the execution info, return a list of nodes in topological order that
+ * need to be executed to compute the output.
+ */
+function getNodesInTopologicalOrder(graph, weightMap, executionInfo) {
+ const { usedNodes, inputs } = executionInfo;
+ const frontier = [];
+ const inputNodes = Object.keys(inputs)
+ .map(name => Object(utils["e" /* parseNodeName */])(name)[0])
+ .map(name => graph.nodes[name]);
+ inputNodes.forEach(input => {
+ if (usedNodes.has(input.name)) {
+ frontier.push(input);
+ }
+ });
+ graph.weights.forEach(weight => {
+ if (usedNodes.has(weight.name)) {
+ frontier.push(weight);
+ }
+ });
+ const seen = new Set();
+ const orderedNodes = [];
+ while (frontier.length > 0) {
+ const node = frontier.pop();
+ seen.add(node.name);
+ if (!weightMap[node.name]) {
+ orderedNodes.push(node);
+ }
+ node.children.forEach(child => {
+ if (!seen.has(child.name) && usedNodes.has(child.name) &&
+ child.inputs.every(input => seen.has(input.name))) {
+ frontier.push(child);
+ }
+ });
+ }
+ return orderedNodes;
+}
+const CONTROL_FLOW_OPS = [
+ 'Switch', 'Merge', 'Enter', 'Exit', 'NextIteration', 'StatelessIf',
+ 'StatelessWhile'
+];
+const DYNAMIC_SHAPE_OPS = [
+ 'NonMaxSuppressionV2', 'NonMaxSuppressionV3', 'NonMaxSuppressionV5', 'Where'
+];
+function isControlFlow(node) {
+ return CONTROL_FLOW_OPS.indexOf(node.op) >= 0;
+}
+function isDynamicShape(node) {
+ return DYNAMIC_SHAPE_OPS.indexOf(node.op) >= 0;
+}
+//# sourceMappingURL=model_analysis.js.map
+// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-converter/dist/executor/graph_executor.js
+/**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+
+
+
+
+class graph_executor_GraphExecutor {
+ /**
+ *
+ * @param graph Graph the model or function graph to be executed.
+ * @param parent When building function exector you need to set the parent
+ * executor. Since the weights and function executor maps are set at parant
+ * level, that function executor can access the function maps and weight maps
+ * through the parent.
+ */
+ constructor(graph, parent) {
+ this.graph = graph;
+ this.parent = parent;
+ this.compiledMap = new Map();
+ this._weightMap = {};
+ this.SEPERATOR = ',';
+ this._functions = {};
+ this._functionExecutorMap = {};
+ this._outputs = graph.outputs;
+ this._inputs = graph.inputs;
+ this._signature = graph.signature;
+ this._functions = graph.functions;
+ // create sub-graph executors
+ if (graph.functions != null) {
+ Object.keys(graph.functions).forEach(name => {
+ this._functionExecutorMap[name] =
+ new graph_executor_GraphExecutor(graph.functions[name], this);
+ });
+ }
+ }
+ get weightIds() {
+ return this.parent ? this.parent.weightIds : this._weightIds;
+ }
+ get functionExecutorMap() {
+ return this.parent ? this.parent.functionExecutorMap :
+ this._functionExecutorMap;
+ }
+ get weightMap() {
+ return this.parent ? this.parent.weightMap : this._weightMap;
+ }
+ set weightMap(weightMap) {
+ const weightIds = Object.keys(weightMap).map(key => weightMap[key].map(tensor => tensor.id));
+ this._weightIds = [].concat(...weightIds);
+ this._weightMap = weightMap;
+ }
+ get inputs() {
+ return this._inputs.map(node => {
+ return {
+ name: node.name,
+ shape: node.attrParams['shape'] ?
+ node.attrParams['shape'].value :
+ undefined,
+ dtype: node.attrParams['dtype'] ?
+ node.attrParams['dtype'].value :
+ undefined
+ };
+ });
+ }
+ get outputs() {
+ return this._outputs.map(node => {
+ return {
+ name: node.name,
+ shape: node.attrParams['shape'] ?
+ node.attrParams['shape'].value :
+ undefined,
+ dtype: node.attrParams['dtype'] ?
+ node.attrParams['dtype'].value :
+ undefined
+ };
+ });
+ }
+ get inputNodes() {
+ return this._inputs.map(node => node.signatureKey || node.name);
+ }
+ get outputNodes() {
+ return this._outputs.map((node) => {
+ const name = node.signatureKey || node.name;
+ return node.defaultOutput ? (`${name}:${node.defaultOutput}`) : name;
+ });
+ }
+ get functions() {
+ return Object.keys(this._functions).reduce((map, key) => {
+ map[key] = this._functions[key].signature;
+ return map;
+ }, {});
+ }
+ getCompilationKey(inputs, outputs) {
+ const sortedInputs = inputs.map(node => node.name).sort();
+ const sortedOutputs = outputs.map(node => node.name).sort();
+ return sortedInputs.join(this.SEPERATOR) + '--' +
+ sortedOutputs.join(this.SEPERATOR);
+ }
+ /**
+ * Compiles the inference graph and returns the minimal set of nodes that are
+ * required for execution, in the correct execution order.
+ */
+ compile(inputs, outputs) {
+ const executionInfo = getExecutionSubgraph(inputs, outputs, this.weightMap);
+ const { missingInputs, dynamicNode, syncInputs } = executionInfo;
+ if (dynamicNode != null) {
+ throw new Error(`This execution contains the node '${dynamicNode.name}', which has ` +
+ `the dynamic op '${dynamicNode.op}'. Please use ` +
+ `model.executeAsync() instead. Alternatively, to avoid the ` +
+ `dynamic ops, specify the inputs [${syncInputs}]`);
+ }
+ if (missingInputs.length > 0) {
+ const outNames = outputs.map(n => n.name);
+ const inNames = Object.keys(inputs);
+ throw new Error(`Cannot compute the outputs [${outNames}] from the provided inputs ` +
+ `[${inNames}]. Missing the following inputs: [${missingInputs}]`);
+ }
+ return getNodesInTopologicalOrder(this.graph, this.weightMap, executionInfo);
+ }
+ /**
+ * Executes the inference for given input tensors.
+ * @param inputs Tensor map for the model inputs, keyed by the input node
+ * names.
+ * @param outputs output node name from the Tensorflow model, if no outputs
+ * are specified, the default outputs of the model would be used. You can
+ * inspect intermediate nodes of the model by adding them to the outputs
+ * array.
+ */
+ execute(inputs, outputs) {
+ inputs = this.mapInputs(inputs);
+ const names = Object.keys(inputs).sort();
+ this.checkInputs(inputs);
+ this.checkInputShapeAndType(inputs);
+ outputs = this.mapOutputs(outputs);
+ this.checkOutputs(outputs);
+ const inputNodes = names.map(name => this.graph.nodes[Object(utils["e" /* parseNodeName */])(name)[0]]);
+ const outputNodes = outputs.map(name => this.graph.nodes[Object(utils["e" /* parseNodeName */])(name)[0]]);
+ const compilationKey = this.getCompilationKey(inputNodes, outputNodes);
+ // Do nothing if the compiled graph cache contains the input.
+ let orderedNodes = this.compiledMap.get(compilationKey);
+ if (orderedNodes == null) {
+ orderedNodes = this.compile(inputs, outputNodes);
+ this.compiledMap.set(compilationKey, orderedNodes);
+ }
+ const tensorArrayMap = {};
+ return Object(dist["tidy"])(() => {
+ const context = new ExecutionContext(this.weightMap, tensorArrayMap, this.functionExecutorMap);
+ const tensorsMap = Object.assign({}, this.weightMap);
+ Object.keys(inputs).forEach(name => {
+ const [nodeName, index] = Object(utils["e" /* parseNodeName */])(name);
+ const tensors = [];
+ tensors[index] = inputs[name];
+ tensorsMap[nodeName] = tensors;
+ });
+ const tensorsToKeep = this.getFrozenTensorIds(tensorsMap);
+ const intermediateTensorConsumerCount = {};
+ for (let i = 0; i < orderedNodes.length; i++) {
+ const node = orderedNodes[i];
+ if (!tensorsMap[node.name]) {
+ const tensors = operation_executor_executeOp(node, tensorsMap, context);
+ if (tensors instanceof Promise) {
+ throw new Error(`The execution of the op '${node.op}' returned a promise. ` +
+ `Please use model.executeAsync() instead.`);
+ }
+ tensorsMap[node.name] = tensors;
+ this.checkTensorForDisposal(node.name, node, tensorsMap, context, tensorsToKeep, outputs, intermediateTensorConsumerCount);
+ }
+ }
+ return outputs.map(name => Object(utils["c" /* getTensor */])(name, tensorsMap, context));
+ });
+ }
+ getFrozenTensorIds(tensorMap) {
+ const ids = [].concat.apply([], Object.keys(tensorMap)
+ .map(key => tensorMap[key])
+ .map(tensors => tensors.map(tensor => tensor.id)));
+ return new Set(ids);
+ }
+ checkTensorForDisposal(nodeName, node, tensorMap, context, tensorsToKeep, outputNames, intermediateTensorConsumerCount) {
+ // Skip output nodes and any control flow nodes, since its dependency is
+ // tricky to track correctly.
+ if (node.category === 'control' || outputNames.indexOf(nodeName) !== -1) {
+ return;
+ }
+ tensorMap[nodeName].forEach(tensor => {
+ if (tensor != null) {
+ intermediateTensorConsumerCount[tensor.id] =
+ (intermediateTensorConsumerCount[tensor.id] || 0) +
+ node.children.length;
+ }
+ });
+ node.inputs.forEach(input => {
+ // Skip any control flow nodes, since its dependency is tricky to track
+ // correctly.
+ if (input.category !== 'control') {
+ const tensors = Object(utils["d" /* getTensorsForCurrentContenxt */])(input.name, tensorMap, context);
+ if (tensors != null) {
+ tensors.forEach(tensor => {
+ if (tensor && !tensorsToKeep.has(tensor.id)) {
+ const count = intermediateTensorConsumerCount[tensor.id];
+ if (count === 1) {
+ tensor.dispose();
+ delete intermediateTensorConsumerCount[tensor.id];
+ }
+ else if (count != null) {
+ // only intermediate nodes has count set, inputs and weights are
+ // not.
+ intermediateTensorConsumerCount[tensor.id]--;
+ }
+ }
+ });
+ }
+ }
+ });
+ }
+ /**
+ * Executes the inference for given input tensors in Async fashion.
+ * @param inputs Tensor map for the model inputs, keyed by the input node
+ * names.
+ * @param outputs output node name from the Tensorflow model, if no outputs
+ * are specified, the default outputs of the model would be used. You can
+ * inspect intermediate nodes of the model by adding them to the outputs
+ * array.
+ * @param disableWarning disable the no dynamic ops warning message, default
+ * to false
+ */
+ async executeAsync(inputs, outputs, disableWarning = false) {
+ inputs = this.mapInputs(inputs);
+ this.checkInputs(inputs);
+ this.checkInputShapeAndType(inputs);
+ outputs = this.mapOutputs(outputs);
+ this.checkOutputs(outputs);
+ const tensorArrayMap = {};
+ const context = new ExecutionContext(this.weightMap, tensorArrayMap, this.functionExecutorMap);
+ // Graph with control flow op requires runtime evaluation of the execution
+ // order, while without control flow the execution order is pre-determined
+ // in the compile method.
+ const tensorMap = await this.executeWithControlFlow(inputs, context, outputs, disableWarning);
+ const results = outputs.map(name => Object(utils["c" /* getTensor */])(name, tensorMap, context));
+ // dispose all the intermediate tensors
+ const outputIds = new Set(results.map(t => t.id));
+ const inputIds = new Set(Object.keys(inputs).map(name => inputs[name].id));
+ Object.keys(tensorMap).forEach(key => {
+ const tensorArray = tensorMap[key];
+ tensorArray.forEach(tensor => {
+ if (tensor && !tensor.isDisposed && !outputIds.has(tensor.id) &&
+ !inputIds.has(tensor.id) &&
+ this.weightIds.indexOf(tensor.id) === -1) {
+ tensor.dispose();
+ }
+ });
+ });
+ return results;
+ }
+ async executeFunctionAsync(inputs) {
+ const mappedInputs = inputs.reduce((map, tensor, index) => {
+ map[this.inputs[index].name] = tensor;
+ return map;
+ }, {});
+ return this.executeAsync(mappedInputs, this.outputNodes, true);
+ }
+ /**
+ * When there are control flow nodes in the graph, the graph execution use
+ * ExecutionContext to keep track of the frames and loop iterators.
+ * @param inputs placeholder tensors for the graph.
+ * @param context the execution context object for current execution.
+ * @param disableWarning disable no async op warning
+ */
+ async executeWithControlFlow(inputs, context, outputNames, disableWarning) {
+ const names = Object.keys(inputs);
+ const inputNodes = names.map(name => this.graph.nodes[Object(utils["e" /* parseNodeName */])(name)[0]]);
+ const outputNodes = outputNames.map(name => this.graph.nodes[Object(utils["e" /* parseNodeName */])(name)[0]]);
+ const { usedNodes, missingInputs, dynamicNode, syncInputs } = getExecutionSubgraph(inputs, outputNodes, this.weightMap);
+ const stack = [...inputNodes, ...this.graph.weights].map(node => {
+ return { node, contexts: context.currentContext };
+ });
+ const tensorsMap = Object.assign({}, this.weightMap);
+ Object.keys(inputs).forEach(name => {
+ const [nodeName, index] = Object(utils["e" /* parseNodeName */])(name);
+ const tensors = [];
+ tensors[index] = inputs[name];
+ tensorsMap[nodeName] = tensors;
+ });
+ const intermediateTensorConsumerCount = {};
+ const tensorsToKeep = this.getFrozenTensorIds(tensorsMap);
+ const added = {};
+ while (stack.length > 0) {
+ const promises = this.processStack(inputNodes, stack, context, tensorsMap, added, tensorsToKeep, outputNames, intermediateTensorConsumerCount, usedNodes);
+ await Promise.all(promises);
+ }
+ if (dynamicNode == null && !disableWarning) {
+ console.warn(`This model execution did not contain any nodes with control flow ` +
+ `or dynamic output shapes. You can use model.execute() instead.`);
+ }
+ const missingOutputs = outputNodes
+ .filter(node => !isControlFlow(node) &&
+ !Object(utils["c" /* getTensor */])(node.name, tensorsMap, context))
+ .map(node => node.name);
+ if (missingOutputs.length > 0) {
+ let alternativeMsg = '';
+ if (dynamicNode != null) {
+ alternativeMsg =
+ `Alternatively, to avoid the dynamic ops, use model.execute() ` +
+ `and specify the inputs [${syncInputs}]`;
+ }
+ throw new Error(`Cannot compute the outputs [${missingOutputs}] from the provided ` +
+ `inputs [${names}]. Consider providing the following inputs: ` +
+ `[${missingInputs}]. ${alternativeMsg}`);
+ }
+ return tensorsMap;
+ }
+ processStack(inputNodes, stack, context, tensorMap, added, tensorsToKeep, outputNames, intermediateTensorConsumerCount, usedNodes) {
+ const promises = [];
+ while (stack.length > 0) {
+ const item = stack.pop();
+ context.currentContext = item.contexts;
+ let nodeName = '';
+ // The tensor of the Enter op with isConstant set should be set
+ // in the parent scope, so it will be available as constant for the
+ // whole loop.
+ if (item.node.op === 'Enter' &&
+ Object(utils["b" /* getParamValue */])('isConstant', item.node, tensorMap, context)) {
+ [nodeName] = Object(utils["a" /* getNodeNameAndIndex */])(item.node.name, context);
+ }
+ // only process nodes that are not provided as input nodes.
+ if (inputNodes.indexOf(item.node) === -1) {
+ const tensors = operation_executor_executeOp(item.node, tensorMap, context);
+ if (!nodeName) {
+ [nodeName] = Object(utils["a" /* getNodeNameAndIndex */])(item.node.name, context);
+ }
+ const currentContext = context.currentContext;
+ if (tensors instanceof Promise) {
+ promises.push(tensors.then(t => {
+ tensorMap[nodeName] = t;
+ context.currentContext = currentContext;
+ this.checkTensorForDisposal(nodeName, item.node, tensorMap, context, tensorsToKeep, outputNames, intermediateTensorConsumerCount);
+ this.processChildNodes(item.node, stack, context, tensorMap, added, usedNodes);
+ return t;
+ }));
+ }
+ else {
+ tensorMap[nodeName] = tensors;
+ this.checkTensorForDisposal(nodeName, item.node, tensorMap, context, tensorsToKeep, outputNames, intermediateTensorConsumerCount);
+ this.processChildNodes(item.node, stack, context, tensorMap, added, usedNodes);
+ }
+ }
+ else {
+ this.processChildNodes(item.node, stack, context, tensorMap, added, usedNodes);
+ }
+ }
+ return promises;
+ }
+ processChildNodes(node, stack, context, tensorMap, added, usedNodes) {
+ node.children.forEach((childNode) => {
+ const [nodeName,] = Object(utils["a" /* getNodeNameAndIndex */])(childNode.name, context);
+ if (added[nodeName] || !usedNodes.has(childNode.name)) {
+ return;
+ }
+ // Merge op can be pushed if any of its inputs has value.
+ if (childNode.op === 'Merge') {
+ if (childNode.inputNames.some(name => {
+ return !!Object(utils["c" /* getTensor */])(name, tensorMap, context);
+ })) {
+ added[nodeName] = true;
+ stack.push({ contexts: context.currentContext, node: childNode });
+ }
+ }
+ else // Otherwise all inputs must to have value.
+ if (childNode.inputNames.every(name => {
+ return !!Object(utils["c" /* getTensor */])(name, tensorMap, context);
+ })) {
+ added[nodeName] = true;
+ stack.push({ contexts: context.currentContext, node: childNode });
+ }
+ });
+ }
+ /**
+ * Releases the memory used by the weight tensors.
+ */
+ dispose() {
+ Object.keys(this.weightMap)
+ .forEach(key => this.weightMap[key].forEach(tensor => tensor.dispose()));
+ }
+ checkInputShapeAndType(inputs) {
+ Object.keys(inputs).forEach(name => {
+ const input = inputs[name];
+ const [nodeName,] = Object(utils["e" /* parseNodeName */])(name);
+ const node = this.graph.nodes[nodeName];
+ if (node.attrParams['shape'] && node.attrParams['shape'].value) {
+ const shape = node.attrParams['shape'].value;
+ const match = shape.length === input.shape.length &&
+ input.shape.every((dim, index) => shape[index] === -1 || shape[index] === dim);
+ dist["util"].assert(match, () => `The shape of dict['${node.name}'] provided in ` +
+ `model.execute(dict) must be [${shape}], but was ` +
+ `[${input.shape}]`);
+ }
+ if (node.attrParams['dtype'] && node.attrParams['dtype'].value) {
+ dist["util"].assert(input.dtype === node.attrParams['dtype'].value, () => `The dtype of dict['${node.name}'] provided in ` +
+ `model.execute(dict) must be ` +
+ `${node.attrParams['dtype'].value}, but was ${input.dtype}`);
+ }
+ });
+ }
+ mapInputs(inputs) {
+ const result = {};
+ for (const inputName in inputs) {
+ if (this._signature != null && this._signature.inputs != null &&
+ this._signature.inputs[inputName] != null) {
+ const tensor = this._signature.inputs[inputName];
+ result[tensor.name] = inputs[inputName];
+ }
+ else {
+ result[inputName] = inputs[inputName];
+ }
+ }
+ return result;
+ }
+ checkInputs(inputs) {
+ const notInGraph = Object.keys(inputs).filter(name => {
+ const [nodeName] = Object(utils["e" /* parseNodeName */])(name);
+ return this.graph.nodes[nodeName] == null;
+ });
+ if (notInGraph.length > 0) {
+ throw new Error(`The dict provided in model.execute(dict) has ` +
+ `keys: [${notInGraph}] that are not part of graph`);
+ }
+ }
+ mapOutputs(outputs) {
+ return outputs.map(name => {
+ if (this._signature != null && this._signature.outputs != null &&
+ this._signature.outputs[name] != null) {
+ const tensor = this._signature.outputs[name];
+ return tensor.name;
+ }
+ return name;
+ }, {});
+ }
+ checkOutputs(outputs) {
+ outputs.forEach(name => {
+ const [normalizedName] = Object(utils["e" /* parseNodeName */])(name);
+ if (!this.graph.nodes[normalizedName]) {
+ throw new Error(`The output '${name}' is not found in the graph`);
+ }
+ });
+ }
+}
+//# sourceMappingURL=graph_executor.js.map
+// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-converter/dist/executor/graph_model.js
+/**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+
+
+const TFHUB_SEARCH_PARAM = '?tfjs-format=file';
+const DEFAULT_MODEL_NAME = 'model.json';
+/**
+ * A `tf.GraphModel` is a directed, acyclic graph built from a
+ * SavedModel GraphDef and allows inference execution.
+ *
+ * A `tf.GraphModel` can only be created by loading from a model converted from
+ * a [TensorFlow SavedModel](https://www.tensorflow.org/guide/saved_model) using
+ * the command line converter tool and loaded via `tf.loadGraphModel`.
+ */
+/** @doc {heading: 'Models', subheading: 'Classes'} */
+class graph_model_GraphModel {
+ /**
+ * @param modelUrl url for the model, or an `io.IOHandler`.
+ * @param weightManifestUrl url for the weight file generated by
+ * scripts/convert.py script.
+ * @param requestOption options for Request, which allows to send credentials
+ * and custom headers.
+ * @param onProgress Optional, progress callback function, fired periodically
+ * before the load is completed.
+ */
+ constructor(modelUrl, loadOptions = {}) {
+ this.modelUrl = modelUrl;
+ this.loadOptions = loadOptions;
+ this.version = 'n/a';
+ if (loadOptions == null) {
+ this.loadOptions = {};
+ }
+ }
+ // Returns the version information for the tensorflow model GraphDef.
+ get modelVersion() {
+ return this.version;
+ }
+ get inputNodes() {
+ return this.executor.inputNodes;
+ }
+ get outputNodes() {
+ return this.executor.outputNodes;
+ }
+ get inputs() {
+ return this.executor.inputs;
+ }
+ get outputs() {
+ return this.executor.outputs;
+ }
+ get weights() {
+ return this.executor.weightMap;
+ }
+ findIOHandler() {
+ const path = this.modelUrl;
+ if (path.load != null) {
+ // Path is an IO Handler.
+ this.handler = path;
+ }
+ else if (this.loadOptions.requestInit != null) {
+ this.handler = dist["io"].browserHTTPRequest(path, this.loadOptions);
+ }
+ else {
+ const handlers = dist["io"].getLoadHandlers(path, this.loadOptions);
+ if (handlers.length === 0) {
+ // For backward compatibility: if no load handler can be found,
+ // assume it is a relative http path.
+ handlers.push(dist["io"].browserHTTPRequest(path, this.loadOptions));
+ }
+ else if (handlers.length > 1) {
+ throw new Error(`Found more than one (${handlers.length}) load handlers for ` +
+ `URL '${[path]}'`);
+ }
+ this.handler = handlers[0];
+ }
+ }
+ /**
+ * Loads the model and weight files, construct the in memory weight map and
+ * compile the inference graph.
+ */
+ async load() {
+ this.findIOHandler();
+ if (this.handler.load == null) {
+ throw new Error('Cannot proceed with model loading because the IOHandler provided ' +
+ 'does not have the `load` method implemented.');
+ }
+ const artifacts = await this.handler.load();
+ return this.loadSync(artifacts);
+ }
+ /**
+ * Synchronously construct the in memory weight map and
+ * compile the inference graph.
+ */
+ /** @doc {heading: 'Models', subheading: 'Classes', ignoreCI: true} */
+ loadSync(artifacts) {
+ this.artifacts = artifacts;
+ const graph = this.artifacts.modelTopology;
+ let signature = {};
+ if (this.artifacts.userDefinedMetadata != null) {
+ signature = // tslint:disable-next-line:no-any
+ this.artifacts.userDefinedMetadata.signature;
+ }
+ this.version = `${graph.versions.producer}.${graph.versions.minConsumer}`;
+ const weightMap = dist["io"].decodeWeights(this.artifacts.weightData, this.artifacts.weightSpecs);
+ this.executor = new graph_executor_GraphExecutor(operation_mapper["a" /* OperationMapper */].Instance.transformGraph(graph, signature));
+ this.executor.weightMap = this.convertTensorMapToTensorsMap(weightMap);
+ return true;
+ }
+ /**
+ * Save the configuration and/or weights of the GraphModel.
+ *
+ * An `IOHandler` is an object that has a `save` method of the proper
+ * signature defined. The `save` method manages the storing or
+ * transmission of serialized data ("artifacts") that represent the
+ * model's topology and weights onto or via a specific medium, such as
+ * file downloads, local storage, IndexedDB in the web browser and HTTP
+ * requests to a server. TensorFlow.js provides `IOHandler`
+ * implementations for a number of frequently used saving mediums, such as
+ * `tf.io.browserDownloads` and `tf.io.browserLocalStorage`. See `tf.io`
+ * for more details.
+ *
+ * This method also allows you to refer to certain types of `IOHandler`s
+ * as URL-like string shortcuts, such as 'localstorage://' and
+ * 'indexeddb://'.
+ *
+ * Example 1: Save `model`'s topology and weights to browser [local
+ * storage](https://developer.mozilla.org/en-US/docs/Web/API/Window/localStorage);
+ * then load it back.
+ *
+ * ```js
+ * const modelUrl =
+ * 'https://storage.googleapis.com/tfjs-models/savedmodel/mobilenet_v2_1.0_224/model.json';
+ * const model = await tf.loadGraphModel(modelUrl);
+ * const zeros = tf.zeros([1, 224, 224, 3]);
+ * model.predict(zeros).print();
+ *
+ * const saveResults = await model.save('localstorage://my-model-1');
+ *
+ * const loadedModel = await tf.loadGraphModel('localstorage://my-model-1');
+ * console.log('Prediction from loaded model:');
+ * model.predict(zeros).print();
+ * ```
+ *
+ * @param handlerOrURL An instance of `IOHandler` or a URL-like,
+ * scheme-based string shortcut for `IOHandler`.
+ * @param config Options for saving the model.
+ * @returns A `Promise` of `SaveResult`, which summarizes the result of
+ * the saving, such as byte sizes of the saved artifacts for the model's
+ * topology and weight values.
+ */
+ /**
+ * @doc {heading: 'Models', subheading: 'Classes', ignoreCI: true}
+ */
+ async save(handlerOrURL, config) {
+ if (typeof handlerOrURL === 'string') {
+ const handlers = dist["io"].getSaveHandlers(handlerOrURL);
+ if (handlers.length === 0) {
+ throw new Error(`Cannot find any save handlers for URL '${handlerOrURL}'`);
+ }
+ else if (handlers.length > 1) {
+ throw new Error(`Found more than one (${handlers.length}) save handlers for ` +
+ `URL '${handlerOrURL}'`);
+ }
+ handlerOrURL = handlers[0];
+ }
+ if (handlerOrURL.save == null) {
+ throw new Error('GraphModel.save() cannot proceed because the IOHandler ' +
+ 'provided does not have the `save` attribute defined.');
+ }
+ return handlerOrURL.save(this.artifacts);
+ }
+ /**
+ * Execute the inference for the input tensors.
+ *
+ * @param input The input tensors, when there is single input for the model,
+ * inputs param should be a `tf.Tensor`. For models with mutliple inputs,
+ * inputs params should be in either `tf.Tensor`[] if the input order is
+ * fixed, or otherwise NamedTensorMap format.
+ *
+ * For model with multiple inputs, we recommend you use NamedTensorMap as the
+ * input type, if you use `tf.Tensor`[], the order of the array needs to
+ * follow the
+ * order of inputNodes array. @see {@link GraphModel.inputNodes}
+ *
+ * You can also feed any intermediate nodes using the NamedTensorMap as the
+ * input type. For example, given the graph
+ * InputNode => Intermediate => OutputNode,
+ * you can execute the subgraph Intermediate => OutputNode by calling
+ * model.execute('IntermediateNode' : tf.tensor(...));
+ *
+ * This is useful for models that uses tf.dynamic_rnn, where the intermediate
+ * state needs to be fed manually.
+ *
+ * For batch inference execution, the tensors for each input need to be
+ * concatenated together. For example with mobilenet, the required input shape
+ * is [1, 244, 244, 3], which represents the [batch, height, width, channel].
+ * If we are provide a batched data of 100 images, the input tensor should be
+ * in the shape of [100, 244, 244, 3].
+ *
+ * @param config Prediction configuration for specifying the batch size and
+ * output node names. Currently the batch size option is ignored for graph
+ * model.
+ *
+ * @returns Inference result tensors. The output would be single `tf.Tensor`
+ * if model has single output node, otherwise Tensor[] or NamedTensorMap[]
+ * will be returned for model with multiple outputs.
+ */
+ /** @doc {heading: 'Models', subheading: 'Classes'} */
+ predict(inputs, config) {
+ return this.execute(inputs, this.outputNodes);
+ }
+ normalizeInputs(inputs) {
+ if (!(inputs instanceof dist["Tensor"]) && !Array.isArray(inputs)) {
+ // The input is already a NamedTensorMap.
+ return inputs;
+ }
+ inputs = Array.isArray(inputs) ? inputs : [inputs];
+ if (inputs.length !== this.inputNodes.length) {
+ throw new Error('Input tensor count mismatch,' +
+ `the graph model has ${this.inputNodes.length} placeholders, ` +
+ `while there are ${inputs.length} input tensors.`);
+ }
+ return this.inputNodes.reduce((map, inputName, i) => {
+ map[inputName] = inputs[i];
+ return map;
+ }, {});
+ }
+ normalizeOutputs(outputs) {
+ outputs = outputs || this.outputNodes;
+ return !Array.isArray(outputs) ? [outputs] : outputs;
+ }
+ /**
+ * Executes inference for the model for given input tensors.
+ * @param inputs tensor, tensor array or tensor map of the inputs for the
+ * model, keyed by the input node names.
+ * @param outputs output node name from the Tensorflow model, if no
+ * outputs are specified, the default outputs of the model would be used.
+ * You can inspect intermediate nodes of the model by adding them to the
+ * outputs array.
+ *
+ * @returns A single tensor if provided with a single output or no outputs
+ * are provided and there is only one default output, otherwise return a
+ * tensor array. The order of the tensor array is the same as the outputs
+ * if provided, otherwise the order of outputNodes attribute of the model.
+ */
+ /** @doc {heading: 'Models', subheading: 'Classes'} */
+ execute(inputs, outputs) {
+ inputs = this.normalizeInputs(inputs);
+ outputs = this.normalizeOutputs(outputs);
+ const result = this.executor.execute(inputs, outputs);
+ return result.length > 1 ? result : result[0];
+ }
+ /**
+ * Executes inference for the model for given input tensors in async
+ * fashion, use this method when your model contains control flow ops.
+ * @param inputs tensor, tensor array or tensor map of the inputs for the
+ * model, keyed by the input node names.
+ * @param outputs output node name from the Tensorflow model, if no outputs
+ * are specified, the default outputs of the model would be used. You can
+ * inspect intermediate nodes of the model by adding them to the outputs
+ * array.
+ *
+ * @returns A Promise of single tensor if provided with a single output or
+ * no outputs are provided and there is only one default output, otherwise
+ * return a tensor map.
+ */
+ /** @doc {heading: 'Models', subheading: 'Classes'} */
+ async executeAsync(inputs, outputs) {
+ inputs = this.normalizeInputs(inputs);
+ outputs = this.normalizeOutputs(outputs);
+ const result = await this.executor.executeAsync(inputs, outputs);
+ return result.length > 1 ? result : result[0];
+ }
+ convertTensorMapToTensorsMap(map) {
+ return Object.keys(map).reduce((newMap, key) => {
+ newMap[key] = [map[key]];
+ return newMap;
+ }, {});
+ }
+ /**
+ * Releases the memory used by the weight tensors.
+ */
+ /** @doc {heading: 'Models', subheading: 'Classes'} */
+ dispose() {
+ this.executor.dispose();
+ }
+}
+/**
+ * Load a graph model given a URL to the model definition.
+ *
+ * Example of loading MobileNetV2 from a URL and making a prediction with a
+ * zeros input:
+ *
+ * ```js
+ * const modelUrl =
+ * 'https://storage.googleapis.com/tfjs-models/savedmodel/mobilenet_v2_1.0_224/model.json';
+ * const model = await tf.loadGraphModel(modelUrl);
+ * const zeros = tf.zeros([1, 224, 224, 3]);
+ * model.predict(zeros).print();
+ * ```
+ *
+ * Example of loading MobileNetV2 from a TF Hub URL and making a prediction with
+ * a zeros input:
+ *
+ * ```js
+ * const modelUrl =
+ * 'https://tfhub.dev/google/imagenet/mobilenet_v2_140_224/classification/2';
+ * const model = await tf.loadGraphModel(modelUrl, {fromTFHub: true});
+ * const zeros = tf.zeros([1, 224, 224, 3]);
+ * model.predict(zeros).print();
+ * ```
+ * @param modelUrl The url or an `io.IOHandler` that loads the model.
+ * @param options Options for the HTTP request, which allows to send credentials
+ * and custom headers.
+ */
+/** @doc {heading: 'Models', subheading: 'Loading'} */
+async function loadGraphModel(modelUrl, options = {}) {
+ if (modelUrl == null) {
+ throw new Error('modelUrl in loadGraphModel() cannot be null. Please provide a url ' +
+ 'or an IOHandler that loads the model');
+ }
+ if (options == null) {
+ options = {};
+ }
+ if (options.fromTFHub) {
+ if (modelUrl.load == null) {
+ if (!modelUrl.endsWith('/')) {
+ modelUrl = modelUrl + '/';
+ }
+ modelUrl = `${modelUrl}${DEFAULT_MODEL_NAME}${TFHUB_SEARCH_PARAM}`;
+ }
+ }
+ const model = new graph_model_GraphModel(modelUrl, options);
+ await model.load();
+ return model;
+}
+//# sourceMappingURL=graph_model.js.map
+// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-converter/dist/version.js
+/** @license See the LICENSE file. */
+// This code is auto-generated, do not modify this file!
+const version = '2.0.1';
+
+//# sourceMappingURL=version.js.map
+// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-converter/dist/index.js
+/**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+
+
+//# sourceMappingURL=index.js.map
+
+/***/ }),
+/* 39 */
+/***/ (function(module, exports, __webpack_require__) {
+
+"use strict";
+/* WEBPACK VAR INJECTION */(function(global) {/*!
+ * The buffer module from node.js, for the browser.
+ *
+ * @author Feross Aboukhadijeh
+ * @license MIT
+ */
+/* eslint-disable no-proto */
+
+
+
+var base64 = __webpack_require__(65)
+var ieee754 = __webpack_require__(66)
+var isArray = __webpack_require__(67)
+
+exports.Buffer = Buffer
+exports.SlowBuffer = SlowBuffer
+exports.INSPECT_MAX_BYTES = 50
+
+/**
+ * If `Buffer.TYPED_ARRAY_SUPPORT`:
+ * === true Use Uint8Array implementation (fastest)
+ * === false Use Object implementation (most compatible, even IE6)
+ *
+ * Browsers that support typed arrays are IE 10+, Firefox 4+, Chrome 7+, Safari 5.1+,
+ * Opera 11.6+, iOS 4.2+.
+ *
+ * Due to various browser bugs, sometimes the Object implementation will be used even
+ * when the browser supports typed arrays.
+ *
+ * Note:
+ *
+ * - Firefox 4-29 lacks support for adding new properties to `Uint8Array` instances,
+ * See: https://bugzilla.mozilla.org/show_bug.cgi?id=695438.
+ *
+ * - Chrome 9-10 is missing the `TypedArray.prototype.subarray` function.
+ *
+ * - IE10 has a broken `TypedArray.prototype.subarray` function which returns arrays of
+ * incorrect length in some situations.
+
+ * We detect these buggy browsers and set `Buffer.TYPED_ARRAY_SUPPORT` to `false` so they
+ * get the Object implementation, which is slower but behaves correctly.
+ */
+Buffer.TYPED_ARRAY_SUPPORT = global.TYPED_ARRAY_SUPPORT !== undefined
+ ? global.TYPED_ARRAY_SUPPORT
+ : typedArraySupport()
+
+/*
+ * Export kMaxLength after typed array support is determined.
+ */
+exports.kMaxLength = kMaxLength()
+
+function typedArraySupport () {
+ try {
+ var arr = new Uint8Array(1)
+ arr.__proto__ = {__proto__: Uint8Array.prototype, foo: function () { return 42 }}
+ return arr.foo() === 42 && // typed array instances can be augmented
+ typeof arr.subarray === 'function' && // chrome 9-10 lack `subarray`
+ arr.subarray(1, 1).byteLength === 0 // ie10 has broken `subarray`
+ } catch (e) {
+ return false
+ }
+}
+
+function kMaxLength () {
+ return Buffer.TYPED_ARRAY_SUPPORT
+ ? 0x7fffffff
+ : 0x3fffffff
+}
+
+function createBuffer (that, length) {
+ if (kMaxLength() < length) {
+ throw new RangeError('Invalid typed array length')
+ }
+ if (Buffer.TYPED_ARRAY_SUPPORT) {
+ // Return an augmented `Uint8Array` instance, for best performance
+ that = new Uint8Array(length)
+ that.__proto__ = Buffer.prototype
+ } else {
+ // Fallback: Return an object instance of the Buffer class
+ if (that === null) {
+ that = new Buffer(length)
+ }
+ that.length = length
+ }
+
+ return that
+}
+
+/**
+ * The Buffer constructor returns instances of `Uint8Array` that have their
+ * prototype changed to `Buffer.prototype`. Furthermore, `Buffer` is a subclass of
+ * `Uint8Array`, so the returned instances will have all the node `Buffer` methods
+ * and the `Uint8Array` methods. Square bracket notation works as expected -- it
+ * returns a single octet.
+ *
+ * The `Uint8Array` prototype remains unmodified.
+ */
+
+function Buffer (arg, encodingOrOffset, length) {
+ if (!Buffer.TYPED_ARRAY_SUPPORT && !(this instanceof Buffer)) {
+ return new Buffer(arg, encodingOrOffset, length)
+ }
+
+ // Common case.
+ if (typeof arg === 'number') {
+ if (typeof encodingOrOffset === 'string') {
+ throw new Error(
+ 'If encoding is specified then the first argument must be a string'
+ )
+ }
+ return allocUnsafe(this, arg)
+ }
+ return from(this, arg, encodingOrOffset, length)
+}
+
+Buffer.poolSize = 8192 // not used by this implementation
+
+// TODO: Legacy, not needed anymore. Remove in next major version.
+Buffer._augment = function (arr) {
+ arr.__proto__ = Buffer.prototype
+ return arr
+}
+
+function from (that, value, encodingOrOffset, length) {
+ if (typeof value === 'number') {
+ throw new TypeError('"value" argument must not be a number')
+ }
+
+ if (typeof ArrayBuffer !== 'undefined' && value instanceof ArrayBuffer) {
+ return fromArrayBuffer(that, value, encodingOrOffset, length)
+ }
+
+ if (typeof value === 'string') {
+ return fromString(that, value, encodingOrOffset)
+ }
+
+ return fromObject(that, value)
+}
+
+/**
+ * Functionally equivalent to Buffer(arg, encoding) but throws a TypeError
+ * if value is a number.
+ * Buffer.from(str[, encoding])
+ * Buffer.from(array)
+ * Buffer.from(buffer)
+ * Buffer.from(arrayBuffer[, byteOffset[, length]])
+ **/
+Buffer.from = function (value, encodingOrOffset, length) {
+ return from(null, value, encodingOrOffset, length)
+}
+
+if (Buffer.TYPED_ARRAY_SUPPORT) {
+ Buffer.prototype.__proto__ = Uint8Array.prototype
+ Buffer.__proto__ = Uint8Array
+ if (typeof Symbol !== 'undefined' && Symbol.species &&
+ Buffer[Symbol.species] === Buffer) {
+ // Fix subarray() in ES2016. See: https://github.com/feross/buffer/pull/97
+ Object.defineProperty(Buffer, Symbol.species, {
+ value: null,
+ configurable: true
+ })
+ }
+}
+
+function assertSize (size) {
+ if (typeof size !== 'number') {
+ throw new TypeError('"size" argument must be a number')
+ } else if (size < 0) {
+ throw new RangeError('"size" argument must not be negative')
+ }
+}
+
+function alloc (that, size, fill, encoding) {
+ assertSize(size)
+ if (size <= 0) {
+ return createBuffer(that, size)
+ }
+ if (fill !== undefined) {
+ // Only pay attention to encoding if it's a string. This
+ // prevents accidentally sending in a number that would
+ // be interpretted as a start offset.
+ return typeof encoding === 'string'
+ ? createBuffer(that, size).fill(fill, encoding)
+ : createBuffer(that, size).fill(fill)
+ }
+ return createBuffer(that, size)
+}
+
+/**
+ * Creates a new filled Buffer instance.
+ * alloc(size[, fill[, encoding]])
+ **/
+Buffer.alloc = function (size, fill, encoding) {
+ return alloc(null, size, fill, encoding)
+}
+
+function allocUnsafe (that, size) {
+ assertSize(size)
+ that = createBuffer(that, size < 0 ? 0 : checked(size) | 0)
+ if (!Buffer.TYPED_ARRAY_SUPPORT) {
+ for (var i = 0; i < size; ++i) {
+ that[i] = 0
+ }
+ }
+ return that
+}
+
+/**
+ * Equivalent to Buffer(num), by default creates a non-zero-filled Buffer instance.
+ * */
+Buffer.allocUnsafe = function (size) {
+ return allocUnsafe(null, size)
+}
+/**
+ * Equivalent to SlowBuffer(num), by default creates a non-zero-filled Buffer instance.
+ */
+Buffer.allocUnsafeSlow = function (size) {
+ return allocUnsafe(null, size)
+}
+
+function fromString (that, string, encoding) {
+ if (typeof encoding !== 'string' || encoding === '') {
+ encoding = 'utf8'
+ }
+
+ if (!Buffer.isEncoding(encoding)) {
+ throw new TypeError('"encoding" must be a valid string encoding')
+ }
+
+ var length = byteLength(string, encoding) | 0
+ that = createBuffer(that, length)
+
+ var actual = that.write(string, encoding)
+
+ if (actual !== length) {
+ // Writing a hex string, for example, that contains invalid characters will
+ // cause everything after the first invalid character to be ignored. (e.g.
+ // 'abxxcd' will be treated as 'ab')
+ that = that.slice(0, actual)
+ }
+
+ return that
+}
+
+function fromArrayLike (that, array) {
+ var length = array.length < 0 ? 0 : checked(array.length) | 0
+ that = createBuffer(that, length)
+ for (var i = 0; i < length; i += 1) {
+ that[i] = array[i] & 255
+ }
+ return that
+}
+
+function fromArrayBuffer (that, array, byteOffset, length) {
+ array.byteLength // this throws if `array` is not a valid ArrayBuffer
+
+ if (byteOffset < 0 || array.byteLength < byteOffset) {
+ throw new RangeError('\'offset\' is out of bounds')
+ }
+
+ if (array.byteLength < byteOffset + (length || 0)) {
+ throw new RangeError('\'length\' is out of bounds')
+ }
+
+ if (byteOffset === undefined && length === undefined) {
+ array = new Uint8Array(array)
+ } else if (length === undefined) {
+ array = new Uint8Array(array, byteOffset)
+ } else {
+ array = new Uint8Array(array, byteOffset, length)
+ }
+
+ if (Buffer.TYPED_ARRAY_SUPPORT) {
+ // Return an augmented `Uint8Array` instance, for best performance
+ that = array
+ that.__proto__ = Buffer.prototype
+ } else {
+ // Fallback: Return an object instance of the Buffer class
+ that = fromArrayLike(that, array)
+ }
+ return that
+}
+
+function fromObject (that, obj) {
+ if (Buffer.isBuffer(obj)) {
+ var len = checked(obj.length) | 0
+ that = createBuffer(that, len)
+
+ if (that.length === 0) {
+ return that
+ }
+
+ obj.copy(that, 0, 0, len)
+ return that
+ }
+
+ if (obj) {
+ if ((typeof ArrayBuffer !== 'undefined' &&
+ obj.buffer instanceof ArrayBuffer) || 'length' in obj) {
+ if (typeof obj.length !== 'number' || isnan(obj.length)) {
+ return createBuffer(that, 0)
+ }
+ return fromArrayLike(that, obj)
+ }
+
+ if (obj.type === 'Buffer' && isArray(obj.data)) {
+ return fromArrayLike(that, obj.data)
+ }
+ }
+
+ throw new TypeError('First argument must be a string, Buffer, ArrayBuffer, Array, or array-like object.')
+}
+
+function checked (length) {
+ // Note: cannot use `length < kMaxLength()` here because that fails when
+ // length is NaN (which is otherwise coerced to zero.)
+ if (length >= kMaxLength()) {
+ throw new RangeError('Attempt to allocate Buffer larger than maximum ' +
+ 'size: 0x' + kMaxLength().toString(16) + ' bytes')
+ }
+ return length | 0
+}
+
+function SlowBuffer (length) {
+ if (+length != length) { // eslint-disable-line eqeqeq
+ length = 0
+ }
+ return Buffer.alloc(+length)
+}
+
+Buffer.isBuffer = function isBuffer (b) {
+ return !!(b != null && b._isBuffer)
+}
+
+Buffer.compare = function compare (a, b) {
+ if (!Buffer.isBuffer(a) || !Buffer.isBuffer(b)) {
+ throw new TypeError('Arguments must be Buffers')
+ }
+
+ if (a === b) return 0
+
+ var x = a.length
+ var y = b.length
+
+ for (var i = 0, len = Math.min(x, y); i < len; ++i) {
+ if (a[i] !== b[i]) {
+ x = a[i]
+ y = b[i]
+ break
+ }
+ }
+
+ if (x < y) return -1
+ if (y < x) return 1
+ return 0
+}
+
+Buffer.isEncoding = function isEncoding (encoding) {
+ switch (String(encoding).toLowerCase()) {
+ case 'hex':
+ case 'utf8':
+ case 'utf-8':
+ case 'ascii':
+ case 'latin1':
+ case 'binary':
+ case 'base64':
+ case 'ucs2':
+ case 'ucs-2':
+ case 'utf16le':
+ case 'utf-16le':
+ return true
+ default:
+ return false
+ }
+}
+
+Buffer.concat = function concat (list, length) {
+ if (!isArray(list)) {
+ throw new TypeError('"list" argument must be an Array of Buffers')
+ }
+
+ if (list.length === 0) {
+ return Buffer.alloc(0)
+ }
+
+ var i
+ if (length === undefined) {
+ length = 0
+ for (i = 0; i < list.length; ++i) {
+ length += list[i].length
+ }
+ }
+
+ var buffer = Buffer.allocUnsafe(length)
+ var pos = 0
+ for (i = 0; i < list.length; ++i) {
+ var buf = list[i]
+ if (!Buffer.isBuffer(buf)) {
+ throw new TypeError('"list" argument must be an Array of Buffers')
+ }
+ buf.copy(buffer, pos)
+ pos += buf.length
+ }
+ return buffer
+}
+
+function byteLength (string, encoding) {
+ if (Buffer.isBuffer(string)) {
+ return string.length
+ }
+ if (typeof ArrayBuffer !== 'undefined' && typeof ArrayBuffer.isView === 'function' &&
+ (ArrayBuffer.isView(string) || string instanceof ArrayBuffer)) {
+ return string.byteLength
+ }
+ if (typeof string !== 'string') {
+ string = '' + string
+ }
+
+ var len = string.length
+ if (len === 0) return 0
+
+ // Use a for loop to avoid recursion
+ var loweredCase = false
+ for (;;) {
+ switch (encoding) {
+ case 'ascii':
+ case 'latin1':
+ case 'binary':
+ return len
+ case 'utf8':
+ case 'utf-8':
+ case undefined:
+ return utf8ToBytes(string).length
+ case 'ucs2':
+ case 'ucs-2':
+ case 'utf16le':
+ case 'utf-16le':
+ return len * 2
+ case 'hex':
+ return len >>> 1
+ case 'base64':
+ return base64ToBytes(string).length
+ default:
+ if (loweredCase) return utf8ToBytes(string).length // assume utf8
+ encoding = ('' + encoding).toLowerCase()
+ loweredCase = true
+ }
+ }
+}
+Buffer.byteLength = byteLength
+
+function slowToString (encoding, start, end) {
+ var loweredCase = false
+
+ // No need to verify that "this.length <= MAX_UINT32" since it's a read-only
+ // property of a typed array.
+
+ // This behaves neither like String nor Uint8Array in that we set start/end
+ // to their upper/lower bounds if the value passed is out of range.
+ // undefined is handled specially as per ECMA-262 6th Edition,
+ // Section 13.3.3.7 Runtime Semantics: KeyedBindingInitialization.
+ if (start === undefined || start < 0) {
+ start = 0
+ }
+ // Return early if start > this.length. Done here to prevent potential uint32
+ // coercion fail below.
+ if (start > this.length) {
+ return ''
+ }
+
+ if (end === undefined || end > this.length) {
+ end = this.length
+ }
+
+ if (end <= 0) {
+ return ''
+ }
+
+ // Force coersion to uint32. This will also coerce falsey/NaN values to 0.
+ end >>>= 0
+ start >>>= 0
+
+ if (end <= start) {
+ return ''
+ }
+
+ if (!encoding) encoding = 'utf8'
+
+ while (true) {
+ switch (encoding) {
+ case 'hex':
+ return hexSlice(this, start, end)
+
+ case 'utf8':
+ case 'utf-8':
+ return utf8Slice(this, start, end)
+
+ case 'ascii':
+ return asciiSlice(this, start, end)
+
+ case 'latin1':
+ case 'binary':
+ return latin1Slice(this, start, end)
+
+ case 'base64':
+ return base64Slice(this, start, end)
+
+ case 'ucs2':
+ case 'ucs-2':
+ case 'utf16le':
+ case 'utf-16le':
+ return utf16leSlice(this, start, end)
+
+ default:
+ if (loweredCase) throw new TypeError('Unknown encoding: ' + encoding)
+ encoding = (encoding + '').toLowerCase()
+ loweredCase = true
+ }
+ }
+}
+
+// The property is used by `Buffer.isBuffer` and `is-buffer` (in Safari 5-7) to detect
+// Buffer instances.
+Buffer.prototype._isBuffer = true
+
+function swap (b, n, m) {
+ var i = b[n]
+ b[n] = b[m]
+ b[m] = i
+}
+
+Buffer.prototype.swap16 = function swap16 () {
+ var len = this.length
+ if (len % 2 !== 0) {
+ throw new RangeError('Buffer size must be a multiple of 16-bits')
+ }
+ for (var i = 0; i < len; i += 2) {
+ swap(this, i, i + 1)
+ }
+ return this
+}
+
+Buffer.prototype.swap32 = function swap32 () {
+ var len = this.length
+ if (len % 4 !== 0) {
+ throw new RangeError('Buffer size must be a multiple of 32-bits')
+ }
+ for (var i = 0; i < len; i += 4) {
+ swap(this, i, i + 3)
+ swap(this, i + 1, i + 2)
+ }
+ return this
+}
+
+Buffer.prototype.swap64 = function swap64 () {
+ var len = this.length
+ if (len % 8 !== 0) {
+ throw new RangeError('Buffer size must be a multiple of 64-bits')
+ }
+ for (var i = 0; i < len; i += 8) {
+ swap(this, i, i + 7)
+ swap(this, i + 1, i + 6)
+ swap(this, i + 2, i + 5)
+ swap(this, i + 3, i + 4)
+ }
+ return this
+}
+
+Buffer.prototype.toString = function toString () {
+ var length = this.length | 0
+ if (length === 0) return ''
+ if (arguments.length === 0) return utf8Slice(this, 0, length)
+ return slowToString.apply(this, arguments)
+}
+
+Buffer.prototype.equals = function equals (b) {
+ if (!Buffer.isBuffer(b)) throw new TypeError('Argument must be a Buffer')
+ if (this === b) return true
+ return Buffer.compare(this, b) === 0
+}
+
+Buffer.prototype.inspect = function inspect () {
+ var str = ''
+ var max = exports.INSPECT_MAX_BYTES
+ if (this.length > 0) {
+ str = this.toString('hex', 0, max).match(/.{2}/g).join(' ')
+ if (this.length > max) str += ' ... '
+ }
+ return ''
+}
+
+Buffer.prototype.compare = function compare (target, start, end, thisStart, thisEnd) {
+ if (!Buffer.isBuffer(target)) {
+ throw new TypeError('Argument must be a Buffer')
+ }
+
+ if (start === undefined) {
+ start = 0
+ }
+ if (end === undefined) {
+ end = target ? target.length : 0
+ }
+ if (thisStart === undefined) {
+ thisStart = 0
+ }
+ if (thisEnd === undefined) {
+ thisEnd = this.length
+ }
+
+ if (start < 0 || end > target.length || thisStart < 0 || thisEnd > this.length) {
+ throw new RangeError('out of range index')
+ }
+
+ if (thisStart >= thisEnd && start >= end) {
+ return 0
+ }
+ if (thisStart >= thisEnd) {
+ return -1
+ }
+ if (start >= end) {
+ return 1
+ }
+
+ start >>>= 0
+ end >>>= 0
+ thisStart >>>= 0
+ thisEnd >>>= 0
+
+ if (this === target) return 0
+
+ var x = thisEnd - thisStart
+ var y = end - start
+ var len = Math.min(x, y)
+
+ var thisCopy = this.slice(thisStart, thisEnd)
+ var targetCopy = target.slice(start, end)
+
+ for (var i = 0; i < len; ++i) {
+ if (thisCopy[i] !== targetCopy[i]) {
+ x = thisCopy[i]
+ y = targetCopy[i]
+ break
+ }
+ }
+
+ if (x < y) return -1
+ if (y < x) return 1
+ return 0
+}
+
+// Finds either the first index of `val` in `buffer` at offset >= `byteOffset`,
+// OR the last index of `val` in `buffer` at offset <= `byteOffset`.
+//
+// Arguments:
+// - buffer - a Buffer to search
+// - val - a string, Buffer, or number
+// - byteOffset - an index into `buffer`; will be clamped to an int32
+// - encoding - an optional encoding, relevant is val is a string
+// - dir - true for indexOf, false for lastIndexOf
+function bidirectionalIndexOf (buffer, val, byteOffset, encoding, dir) {
+ // Empty buffer means no match
+ if (buffer.length === 0) return -1
+
+ // Normalize byteOffset
+ if (typeof byteOffset === 'string') {
+ encoding = byteOffset
+ byteOffset = 0
+ } else if (byteOffset > 0x7fffffff) {
+ byteOffset = 0x7fffffff
+ } else if (byteOffset < -0x80000000) {
+ byteOffset = -0x80000000
+ }
+ byteOffset = +byteOffset // Coerce to Number.
+ if (isNaN(byteOffset)) {
+ // byteOffset: it it's undefined, null, NaN, "foo", etc, search whole buffer
+ byteOffset = dir ? 0 : (buffer.length - 1)
+ }
+
+ // Normalize byteOffset: negative offsets start from the end of the buffer
+ if (byteOffset < 0) byteOffset = buffer.length + byteOffset
+ if (byteOffset >= buffer.length) {
+ if (dir) return -1
+ else byteOffset = buffer.length - 1
+ } else if (byteOffset < 0) {
+ if (dir) byteOffset = 0
+ else return -1
+ }
+
+ // Normalize val
+ if (typeof val === 'string') {
+ val = Buffer.from(val, encoding)
+ }
+
+ // Finally, search either indexOf (if dir is true) or lastIndexOf
+ if (Buffer.isBuffer(val)) {
+ // Special case: looking for empty string/buffer always fails
+ if (val.length === 0) {
+ return -1
+ }
+ return arrayIndexOf(buffer, val, byteOffset, encoding, dir)
+ } else if (typeof val === 'number') {
+ val = val & 0xFF // Search for a byte value [0-255]
+ if (Buffer.TYPED_ARRAY_SUPPORT &&
+ typeof Uint8Array.prototype.indexOf === 'function') {
+ if (dir) {
+ return Uint8Array.prototype.indexOf.call(buffer, val, byteOffset)
+ } else {
+ return Uint8Array.prototype.lastIndexOf.call(buffer, val, byteOffset)
+ }
+ }
+ return arrayIndexOf(buffer, [ val ], byteOffset, encoding, dir)
+ }
+
+ throw new TypeError('val must be string, number or Buffer')
+}
+
+function arrayIndexOf (arr, val, byteOffset, encoding, dir) {
+ var indexSize = 1
+ var arrLength = arr.length
+ var valLength = val.length
+
+ if (encoding !== undefined) {
+ encoding = String(encoding).toLowerCase()
+ if (encoding === 'ucs2' || encoding === 'ucs-2' ||
+ encoding === 'utf16le' || encoding === 'utf-16le') {
+ if (arr.length < 2 || val.length < 2) {
+ return -1
+ }
+ indexSize = 2
+ arrLength /= 2
+ valLength /= 2
+ byteOffset /= 2
+ }
+ }
+
+ function read (buf, i) {
+ if (indexSize === 1) {
+ return buf[i]
+ } else {
+ return buf.readUInt16BE(i * indexSize)
+ }
+ }
+
+ var i
+ if (dir) {
+ var foundIndex = -1
+ for (i = byteOffset; i < arrLength; i++) {
+ if (read(arr, i) === read(val, foundIndex === -1 ? 0 : i - foundIndex)) {
+ if (foundIndex === -1) foundIndex = i
+ if (i - foundIndex + 1 === valLength) return foundIndex * indexSize
+ } else {
+ if (foundIndex !== -1) i -= i - foundIndex
+ foundIndex = -1
+ }
+ }
+ } else {
+ if (byteOffset + valLength > arrLength) byteOffset = arrLength - valLength
+ for (i = byteOffset; i >= 0; i--) {
+ var found = true
+ for (var j = 0; j < valLength; j++) {
+ if (read(arr, i + j) !== read(val, j)) {
+ found = false
+ break
+ }
+ }
+ if (found) return i
+ }
+ }
+
+ return -1
+}
+
+Buffer.prototype.includes = function includes (val, byteOffset, encoding) {
+ return this.indexOf(val, byteOffset, encoding) !== -1
+}
+
+Buffer.prototype.indexOf = function indexOf (val, byteOffset, encoding) {
+ return bidirectionalIndexOf(this, val, byteOffset, encoding, true)
+}
+
+Buffer.prototype.lastIndexOf = function lastIndexOf (val, byteOffset, encoding) {
+ return bidirectionalIndexOf(this, val, byteOffset, encoding, false)
+}
+
+function hexWrite (buf, string, offset, length) {
+ offset = Number(offset) || 0
+ var remaining = buf.length - offset
+ if (!length) {
+ length = remaining
+ } else {
+ length = Number(length)
+ if (length > remaining) {
+ length = remaining
+ }
+ }
+
+ // must be an even number of digits
+ var strLen = string.length
+ if (strLen % 2 !== 0) throw new TypeError('Invalid hex string')
+
+ if (length > strLen / 2) {
+ length = strLen / 2
+ }
+ for (var i = 0; i < length; ++i) {
+ var parsed = parseInt(string.substr(i * 2, 2), 16)
+ if (isNaN(parsed)) return i
+ buf[offset + i] = parsed
+ }
+ return i
+}
+
+function utf8Write (buf, string, offset, length) {
+ return blitBuffer(utf8ToBytes(string, buf.length - offset), buf, offset, length)
+}
+
+function asciiWrite (buf, string, offset, length) {
+ return blitBuffer(asciiToBytes(string), buf, offset, length)
+}
+
+function latin1Write (buf, string, offset, length) {
+ return asciiWrite(buf, string, offset, length)
+}
+
+function base64Write (buf, string, offset, length) {
+ return blitBuffer(base64ToBytes(string), buf, offset, length)
+}
+
+function ucs2Write (buf, string, offset, length) {
+ return blitBuffer(utf16leToBytes(string, buf.length - offset), buf, offset, length)
+}
+
+Buffer.prototype.write = function write (string, offset, length, encoding) {
+ // Buffer#write(string)
+ if (offset === undefined) {
+ encoding = 'utf8'
+ length = this.length
+ offset = 0
+ // Buffer#write(string, encoding)
+ } else if (length === undefined && typeof offset === 'string') {
+ encoding = offset
+ length = this.length
+ offset = 0
+ // Buffer#write(string, offset[, length][, encoding])
+ } else if (isFinite(offset)) {
+ offset = offset | 0
+ if (isFinite(length)) {
+ length = length | 0
+ if (encoding === undefined) encoding = 'utf8'
+ } else {
+ encoding = length
+ length = undefined
+ }
+ // legacy write(string, encoding, offset, length) - remove in v0.13
+ } else {
+ throw new Error(
+ 'Buffer.write(string, encoding, offset[, length]) is no longer supported'
+ )
+ }
+
+ var remaining = this.length - offset
+ if (length === undefined || length > remaining) length = remaining
+
+ if ((string.length > 0 && (length < 0 || offset < 0)) || offset > this.length) {
+ throw new RangeError('Attempt to write outside buffer bounds')
+ }
+
+ if (!encoding) encoding = 'utf8'
+
+ var loweredCase = false
+ for (;;) {
+ switch (encoding) {
+ case 'hex':
+ return hexWrite(this, string, offset, length)
+
+ case 'utf8':
+ case 'utf-8':
+ return utf8Write(this, string, offset, length)
+
+ case 'ascii':
+ return asciiWrite(this, string, offset, length)
+
+ case 'latin1':
+ case 'binary':
+ return latin1Write(this, string, offset, length)
+
+ case 'base64':
+ // Warning: maxLength not taken into account in base64Write
+ return base64Write(this, string, offset, length)
+
+ case 'ucs2':
+ case 'ucs-2':
+ case 'utf16le':
+ case 'utf-16le':
+ return ucs2Write(this, string, offset, length)
+
+ default:
+ if (loweredCase) throw new TypeError('Unknown encoding: ' + encoding)
+ encoding = ('' + encoding).toLowerCase()
+ loweredCase = true
+ }
+ }
+}
+
+Buffer.prototype.toJSON = function toJSON () {
+ return {
+ type: 'Buffer',
+ data: Array.prototype.slice.call(this._arr || this, 0)
+ }
+}
+
+function base64Slice (buf, start, end) {
+ if (start === 0 && end === buf.length) {
+ return base64.fromByteArray(buf)
+ } else {
+ return base64.fromByteArray(buf.slice(start, end))
+ }
+}
+
+function utf8Slice (buf, start, end) {
+ end = Math.min(buf.length, end)
+ var res = []
+
+ var i = start
+ while (i < end) {
+ var firstByte = buf[i]
+ var codePoint = null
+ var bytesPerSequence = (firstByte > 0xEF) ? 4
+ : (firstByte > 0xDF) ? 3
+ : (firstByte > 0xBF) ? 2
+ : 1
+
+ if (i + bytesPerSequence <= end) {
+ var secondByte, thirdByte, fourthByte, tempCodePoint
+
+ switch (bytesPerSequence) {
+ case 1:
+ if (firstByte < 0x80) {
+ codePoint = firstByte
+ }
+ break
+ case 2:
+ secondByte = buf[i + 1]
+ if ((secondByte & 0xC0) === 0x80) {
+ tempCodePoint = (firstByte & 0x1F) << 0x6 | (secondByte & 0x3F)
+ if (tempCodePoint > 0x7F) {
+ codePoint = tempCodePoint
+ }
+ }
+ break
+ case 3:
+ secondByte = buf[i + 1]
+ thirdByte = buf[i + 2]
+ if ((secondByte & 0xC0) === 0x80 && (thirdByte & 0xC0) === 0x80) {
+ tempCodePoint = (firstByte & 0xF) << 0xC | (secondByte & 0x3F) << 0x6 | (thirdByte & 0x3F)
+ if (tempCodePoint > 0x7FF && (tempCodePoint < 0xD800 || tempCodePoint > 0xDFFF)) {
+ codePoint = tempCodePoint
+ }
+ }
+ break
+ case 4:
+ secondByte = buf[i + 1]
+ thirdByte = buf[i + 2]
+ fourthByte = buf[i + 3]
+ if ((secondByte & 0xC0) === 0x80 && (thirdByte & 0xC0) === 0x80 && (fourthByte & 0xC0) === 0x80) {
+ tempCodePoint = (firstByte & 0xF) << 0x12 | (secondByte & 0x3F) << 0xC | (thirdByte & 0x3F) << 0x6 | (fourthByte & 0x3F)
+ if (tempCodePoint > 0xFFFF && tempCodePoint < 0x110000) {
+ codePoint = tempCodePoint
+ }
+ }
+ }
+ }
+
+ if (codePoint === null) {
+ // we did not generate a valid codePoint so insert a
+ // replacement char (U+FFFD) and advance only 1 byte
+ codePoint = 0xFFFD
+ bytesPerSequence = 1
+ } else if (codePoint > 0xFFFF) {
+ // encode to utf16 (surrogate pair dance)
+ codePoint -= 0x10000
+ res.push(codePoint >>> 10 & 0x3FF | 0xD800)
+ codePoint = 0xDC00 | codePoint & 0x3FF
+ }
+
+ res.push(codePoint)
+ i += bytesPerSequence
+ }
+
+ return decodeCodePointsArray(res)
+}
+
+// Based on http://stackoverflow.com/a/22747272/680742, the browser with
+// the lowest limit is Chrome, with 0x10000 args.
+// We go 1 magnitude less, for safety
+var MAX_ARGUMENTS_LENGTH = 0x1000
+
+function decodeCodePointsArray (codePoints) {
+ var len = codePoints.length
+ if (len <= MAX_ARGUMENTS_LENGTH) {
+ return String.fromCharCode.apply(String, codePoints) // avoid extra slice()
+ }
+
+ // Decode in chunks to avoid "call stack size exceeded".
+ var res = ''
+ var i = 0
+ while (i < len) {
+ res += String.fromCharCode.apply(
+ String,
+ codePoints.slice(i, i += MAX_ARGUMENTS_LENGTH)
+ )
+ }
+ return res
+}
+
+function asciiSlice (buf, start, end) {
+ var ret = ''
+ end = Math.min(buf.length, end)
+
+ for (var i = start; i < end; ++i) {
+ ret += String.fromCharCode(buf[i] & 0x7F)
+ }
+ return ret
+}
+
+function latin1Slice (buf, start, end) {
+ var ret = ''
+ end = Math.min(buf.length, end)
+
+ for (var i = start; i < end; ++i) {
+ ret += String.fromCharCode(buf[i])
+ }
+ return ret
+}
+
+function hexSlice (buf, start, end) {
+ var len = buf.length
+
+ if (!start || start < 0) start = 0
+ if (!end || end < 0 || end > len) end = len
+
+ var out = ''
+ for (var i = start; i < end; ++i) {
+ out += toHex(buf[i])
+ }
+ return out
+}
+
+function utf16leSlice (buf, start, end) {
+ var bytes = buf.slice(start, end)
+ var res = ''
+ for (var i = 0; i < bytes.length; i += 2) {
+ res += String.fromCharCode(bytes[i] + bytes[i + 1] * 256)
+ }
+ return res
+}
+
+Buffer.prototype.slice = function slice (start, end) {
+ var len = this.length
+ start = ~~start
+ end = end === undefined ? len : ~~end
+
+ if (start < 0) {
+ start += len
+ if (start < 0) start = 0
+ } else if (start > len) {
+ start = len
+ }
+
+ if (end < 0) {
+ end += len
+ if (end < 0) end = 0
+ } else if (end > len) {
+ end = len
+ }
+
+ if (end < start) end = start
+
+ var newBuf
+ if (Buffer.TYPED_ARRAY_SUPPORT) {
+ newBuf = this.subarray(start, end)
+ newBuf.__proto__ = Buffer.prototype
+ } else {
+ var sliceLen = end - start
+ newBuf = new Buffer(sliceLen, undefined)
+ for (var i = 0; i < sliceLen; ++i) {
+ newBuf[i] = this[i + start]
+ }
+ }
+
+ return newBuf
+}
+
+/*
+ * Need to make sure that buffer isn't trying to write out of bounds.
+ */
+function checkOffset (offset, ext, length) {
+ if ((offset % 1) !== 0 || offset < 0) throw new RangeError('offset is not uint')
+ if (offset + ext > length) throw new RangeError('Trying to access beyond buffer length')
+}
+
+Buffer.prototype.readUIntLE = function readUIntLE (offset, byteLength, noAssert) {
+ offset = offset | 0
+ byteLength = byteLength | 0
+ if (!noAssert) checkOffset(offset, byteLength, this.length)
+
+ var val = this[offset]
+ var mul = 1
+ var i = 0
+ while (++i < byteLength && (mul *= 0x100)) {
+ val += this[offset + i] * mul
+ }
+
+ return val
+}
+
+Buffer.prototype.readUIntBE = function readUIntBE (offset, byteLength, noAssert) {
+ offset = offset | 0
+ byteLength = byteLength | 0
+ if (!noAssert) {
+ checkOffset(offset, byteLength, this.length)
+ }
+
+ var val = this[offset + --byteLength]
+ var mul = 1
+ while (byteLength > 0 && (mul *= 0x100)) {
+ val += this[offset + --byteLength] * mul
+ }
+
+ return val
+}
+
+Buffer.prototype.readUInt8 = function readUInt8 (offset, noAssert) {
+ if (!noAssert) checkOffset(offset, 1, this.length)
+ return this[offset]
+}
+
+Buffer.prototype.readUInt16LE = function readUInt16LE (offset, noAssert) {
+ if (!noAssert) checkOffset(offset, 2, this.length)
+ return this[offset] | (this[offset + 1] << 8)
+}
+
+Buffer.prototype.readUInt16BE = function readUInt16BE (offset, noAssert) {
+ if (!noAssert) checkOffset(offset, 2, this.length)
+ return (this[offset] << 8) | this[offset + 1]
+}
+
+Buffer.prototype.readUInt32LE = function readUInt32LE (offset, noAssert) {
+ if (!noAssert) checkOffset(offset, 4, this.length)
+
+ return ((this[offset]) |
+ (this[offset + 1] << 8) |
+ (this[offset + 2] << 16)) +
+ (this[offset + 3] * 0x1000000)
+}
+
+Buffer.prototype.readUInt32BE = function readUInt32BE (offset, noAssert) {
+ if (!noAssert) checkOffset(offset, 4, this.length)
+
+ return (this[offset] * 0x1000000) +
+ ((this[offset + 1] << 16) |
+ (this[offset + 2] << 8) |
+ this[offset + 3])
+}
+
+Buffer.prototype.readIntLE = function readIntLE (offset, byteLength, noAssert) {
+ offset = offset | 0
+ byteLength = byteLength | 0
+ if (!noAssert) checkOffset(offset, byteLength, this.length)
+
+ var val = this[offset]
+ var mul = 1
+ var i = 0
+ while (++i < byteLength && (mul *= 0x100)) {
+ val += this[offset + i] * mul
+ }
+ mul *= 0x80
+
+ if (val >= mul) val -= Math.pow(2, 8 * byteLength)
+
+ return val
+}
+
+Buffer.prototype.readIntBE = function readIntBE (offset, byteLength, noAssert) {
+ offset = offset | 0
+ byteLength = byteLength | 0
+ if (!noAssert) checkOffset(offset, byteLength, this.length)
+
+ var i = byteLength
+ var mul = 1
+ var val = this[offset + --i]
+ while (i > 0 && (mul *= 0x100)) {
+ val += this[offset + --i] * mul
+ }
+ mul *= 0x80
+
+ if (val >= mul) val -= Math.pow(2, 8 * byteLength)
+
+ return val
+}
+
+Buffer.prototype.readInt8 = function readInt8 (offset, noAssert) {
+ if (!noAssert) checkOffset(offset, 1, this.length)
+ if (!(this[offset] & 0x80)) return (this[offset])
+ return ((0xff - this[offset] + 1) * -1)
+}
+
+Buffer.prototype.readInt16LE = function readInt16LE (offset, noAssert) {
+ if (!noAssert) checkOffset(offset, 2, this.length)
+ var val = this[offset] | (this[offset + 1] << 8)
+ return (val & 0x8000) ? val | 0xFFFF0000 : val
+}
+
+Buffer.prototype.readInt16BE = function readInt16BE (offset, noAssert) {
+ if (!noAssert) checkOffset(offset, 2, this.length)
+ var val = this[offset + 1] | (this[offset] << 8)
+ return (val & 0x8000) ? val | 0xFFFF0000 : val
+}
+
+Buffer.prototype.readInt32LE = function readInt32LE (offset, noAssert) {
+ if (!noAssert) checkOffset(offset, 4, this.length)
+
+ return (this[offset]) |
+ (this[offset + 1] << 8) |
+ (this[offset + 2] << 16) |
+ (this[offset + 3] << 24)
+}
+
+Buffer.prototype.readInt32BE = function readInt32BE (offset, noAssert) {
+ if (!noAssert) checkOffset(offset, 4, this.length)
+
+ return (this[offset] << 24) |
+ (this[offset + 1] << 16) |
+ (this[offset + 2] << 8) |
+ (this[offset + 3])
+}
+
+Buffer.prototype.readFloatLE = function readFloatLE (offset, noAssert) {
+ if (!noAssert) checkOffset(offset, 4, this.length)
+ return ieee754.read(this, offset, true, 23, 4)
+}
+
+Buffer.prototype.readFloatBE = function readFloatBE (offset, noAssert) {
+ if (!noAssert) checkOffset(offset, 4, this.length)
+ return ieee754.read(this, offset, false, 23, 4)
+}
+
+Buffer.prototype.readDoubleLE = function readDoubleLE (offset, noAssert) {
+ if (!noAssert) checkOffset(offset, 8, this.length)
+ return ieee754.read(this, offset, true, 52, 8)
+}
+
+Buffer.prototype.readDoubleBE = function readDoubleBE (offset, noAssert) {
+ if (!noAssert) checkOffset(offset, 8, this.length)
+ return ieee754.read(this, offset, false, 52, 8)
+}
+
+function checkInt (buf, value, offset, ext, max, min) {
+ if (!Buffer.isBuffer(buf)) throw new TypeError('"buffer" argument must be a Buffer instance')
+ if (value > max || value < min) throw new RangeError('"value" argument is out of bounds')
+ if (offset + ext > buf.length) throw new RangeError('Index out of range')
+}
+
+Buffer.prototype.writeUIntLE = function writeUIntLE (value, offset, byteLength, noAssert) {
+ value = +value
+ offset = offset | 0
+ byteLength = byteLength | 0
+ if (!noAssert) {
+ var maxBytes = Math.pow(2, 8 * byteLength) - 1
+ checkInt(this, value, offset, byteLength, maxBytes, 0)
+ }
+
+ var mul = 1
+ var i = 0
+ this[offset] = value & 0xFF
+ while (++i < byteLength && (mul *= 0x100)) {
+ this[offset + i] = (value / mul) & 0xFF
+ }
+
+ return offset + byteLength
+}
+
+Buffer.prototype.writeUIntBE = function writeUIntBE (value, offset, byteLength, noAssert) {
+ value = +value
+ offset = offset | 0
+ byteLength = byteLength | 0
+ if (!noAssert) {
+ var maxBytes = Math.pow(2, 8 * byteLength) - 1
+ checkInt(this, value, offset, byteLength, maxBytes, 0)
+ }
+
+ var i = byteLength - 1
+ var mul = 1
+ this[offset + i] = value & 0xFF
+ while (--i >= 0 && (mul *= 0x100)) {
+ this[offset + i] = (value / mul) & 0xFF
+ }
+
+ return offset + byteLength
+}
+
+Buffer.prototype.writeUInt8 = function writeUInt8 (value, offset, noAssert) {
+ value = +value
+ offset = offset | 0
+ if (!noAssert) checkInt(this, value, offset, 1, 0xff, 0)
+ if (!Buffer.TYPED_ARRAY_SUPPORT) value = Math.floor(value)
+ this[offset] = (value & 0xff)
+ return offset + 1
+}
+
+function objectWriteUInt16 (buf, value, offset, littleEndian) {
+ if (value < 0) value = 0xffff + value + 1
+ for (var i = 0, j = Math.min(buf.length - offset, 2); i < j; ++i) {
+ buf[offset + i] = (value & (0xff << (8 * (littleEndian ? i : 1 - i)))) >>>
+ (littleEndian ? i : 1 - i) * 8
+ }
+}
+
+Buffer.prototype.writeUInt16LE = function writeUInt16LE (value, offset, noAssert) {
+ value = +value
+ offset = offset | 0
+ if (!noAssert) checkInt(this, value, offset, 2, 0xffff, 0)
+ if (Buffer.TYPED_ARRAY_SUPPORT) {
+ this[offset] = (value & 0xff)
+ this[offset + 1] = (value >>> 8)
+ } else {
+ objectWriteUInt16(this, value, offset, true)
+ }
+ return offset + 2
+}
+
+Buffer.prototype.writeUInt16BE = function writeUInt16BE (value, offset, noAssert) {
+ value = +value
+ offset = offset | 0
+ if (!noAssert) checkInt(this, value, offset, 2, 0xffff, 0)
+ if (Buffer.TYPED_ARRAY_SUPPORT) {
+ this[offset] = (value >>> 8)
+ this[offset + 1] = (value & 0xff)
+ } else {
+ objectWriteUInt16(this, value, offset, false)
+ }
+ return offset + 2
+}
+
+function objectWriteUInt32 (buf, value, offset, littleEndian) {
+ if (value < 0) value = 0xffffffff + value + 1
+ for (var i = 0, j = Math.min(buf.length - offset, 4); i < j; ++i) {
+ buf[offset + i] = (value >>> (littleEndian ? i : 3 - i) * 8) & 0xff
+ }
+}
+
+Buffer.prototype.writeUInt32LE = function writeUInt32LE (value, offset, noAssert) {
+ value = +value
+ offset = offset | 0
+ if (!noAssert) checkInt(this, value, offset, 4, 0xffffffff, 0)
+ if (Buffer.TYPED_ARRAY_SUPPORT) {
+ this[offset + 3] = (value >>> 24)
+ this[offset + 2] = (value >>> 16)
+ this[offset + 1] = (value >>> 8)
+ this[offset] = (value & 0xff)
+ } else {
+ objectWriteUInt32(this, value, offset, true)
+ }
+ return offset + 4
+}
+
+Buffer.prototype.writeUInt32BE = function writeUInt32BE (value, offset, noAssert) {
+ value = +value
+ offset = offset | 0
+ if (!noAssert) checkInt(this, value, offset, 4, 0xffffffff, 0)
+ if (Buffer.TYPED_ARRAY_SUPPORT) {
+ this[offset] = (value >>> 24)
+ this[offset + 1] = (value >>> 16)
+ this[offset + 2] = (value >>> 8)
+ this[offset + 3] = (value & 0xff)
+ } else {
+ objectWriteUInt32(this, value, offset, false)
+ }
+ return offset + 4
+}
+
+Buffer.prototype.writeIntLE = function writeIntLE (value, offset, byteLength, noAssert) {
+ value = +value
+ offset = offset | 0
+ if (!noAssert) {
+ var limit = Math.pow(2, 8 * byteLength - 1)
+
+ checkInt(this, value, offset, byteLength, limit - 1, -limit)
+ }
+
+ var i = 0
+ var mul = 1
+ var sub = 0
+ this[offset] = value & 0xFF
+ while (++i < byteLength && (mul *= 0x100)) {
+ if (value < 0 && sub === 0 && this[offset + i - 1] !== 0) {
+ sub = 1
+ }
+ this[offset + i] = ((value / mul) >> 0) - sub & 0xFF
+ }
+
+ return offset + byteLength
+}
+
+Buffer.prototype.writeIntBE = function writeIntBE (value, offset, byteLength, noAssert) {
+ value = +value
+ offset = offset | 0
+ if (!noAssert) {
+ var limit = Math.pow(2, 8 * byteLength - 1)
+
+ checkInt(this, value, offset, byteLength, limit - 1, -limit)
+ }
+
+ var i = byteLength - 1
+ var mul = 1
+ var sub = 0
+ this[offset + i] = value & 0xFF
+ while (--i >= 0 && (mul *= 0x100)) {
+ if (value < 0 && sub === 0 && this[offset + i + 1] !== 0) {
+ sub = 1
+ }
+ this[offset + i] = ((value / mul) >> 0) - sub & 0xFF
+ }
+
+ return offset + byteLength
+}
+
+Buffer.prototype.writeInt8 = function writeInt8 (value, offset, noAssert) {
+ value = +value
+ offset = offset | 0
+ if (!noAssert) checkInt(this, value, offset, 1, 0x7f, -0x80)
+ if (!Buffer.TYPED_ARRAY_SUPPORT) value = Math.floor(value)
+ if (value < 0) value = 0xff + value + 1
+ this[offset] = (value & 0xff)
+ return offset + 1
+}
+
+Buffer.prototype.writeInt16LE = function writeInt16LE (value, offset, noAssert) {
+ value = +value
+ offset = offset | 0
+ if (!noAssert) checkInt(this, value, offset, 2, 0x7fff, -0x8000)
+ if (Buffer.TYPED_ARRAY_SUPPORT) {
+ this[offset] = (value & 0xff)
+ this[offset + 1] = (value >>> 8)
+ } else {
+ objectWriteUInt16(this, value, offset, true)
+ }
+ return offset + 2
+}
+
+Buffer.prototype.writeInt16BE = function writeInt16BE (value, offset, noAssert) {
+ value = +value
+ offset = offset | 0
+ if (!noAssert) checkInt(this, value, offset, 2, 0x7fff, -0x8000)
+ if (Buffer.TYPED_ARRAY_SUPPORT) {
+ this[offset] = (value >>> 8)
+ this[offset + 1] = (value & 0xff)
+ } else {
+ objectWriteUInt16(this, value, offset, false)
+ }
+ return offset + 2
+}
+
+Buffer.prototype.writeInt32LE = function writeInt32LE (value, offset, noAssert) {
+ value = +value
+ offset = offset | 0
+ if (!noAssert) checkInt(this, value, offset, 4, 0x7fffffff, -0x80000000)
+ if (Buffer.TYPED_ARRAY_SUPPORT) {
+ this[offset] = (value & 0xff)
+ this[offset + 1] = (value >>> 8)
+ this[offset + 2] = (value >>> 16)
+ this[offset + 3] = (value >>> 24)
+ } else {
+ objectWriteUInt32(this, value, offset, true)
+ }
+ return offset + 4
+}
+
+Buffer.prototype.writeInt32BE = function writeInt32BE (value, offset, noAssert) {
+ value = +value
+ offset = offset | 0
+ if (!noAssert) checkInt(this, value, offset, 4, 0x7fffffff, -0x80000000)
+ if (value < 0) value = 0xffffffff + value + 1
+ if (Buffer.TYPED_ARRAY_SUPPORT) {
+ this[offset] = (value >>> 24)
+ this[offset + 1] = (value >>> 16)
+ this[offset + 2] = (value >>> 8)
+ this[offset + 3] = (value & 0xff)
+ } else {
+ objectWriteUInt32(this, value, offset, false)
+ }
+ return offset + 4
+}
+
+function checkIEEE754 (buf, value, offset, ext, max, min) {
+ if (offset + ext > buf.length) throw new RangeError('Index out of range')
+ if (offset < 0) throw new RangeError('Index out of range')
+}
+
+function writeFloat (buf, value, offset, littleEndian, noAssert) {
+ if (!noAssert) {
+ checkIEEE754(buf, value, offset, 4, 3.4028234663852886e+38, -3.4028234663852886e+38)
+ }
+ ieee754.write(buf, value, offset, littleEndian, 23, 4)
+ return offset + 4
+}
+
+Buffer.prototype.writeFloatLE = function writeFloatLE (value, offset, noAssert) {
+ return writeFloat(this, value, offset, true, noAssert)
+}
+
+Buffer.prototype.writeFloatBE = function writeFloatBE (value, offset, noAssert) {
+ return writeFloat(this, value, offset, false, noAssert)
+}
+
+function writeDouble (buf, value, offset, littleEndian, noAssert) {
+ if (!noAssert) {
+ checkIEEE754(buf, value, offset, 8, 1.7976931348623157E+308, -1.7976931348623157E+308)
+ }
+ ieee754.write(buf, value, offset, littleEndian, 52, 8)
+ return offset + 8
+}
+
+Buffer.prototype.writeDoubleLE = function writeDoubleLE (value, offset, noAssert) {
+ return writeDouble(this, value, offset, true, noAssert)
+}
+
+Buffer.prototype.writeDoubleBE = function writeDoubleBE (value, offset, noAssert) {
+ return writeDouble(this, value, offset, false, noAssert)
+}
+
+// copy(targetBuffer, targetStart=0, sourceStart=0, sourceEnd=buffer.length)
+Buffer.prototype.copy = function copy (target, targetStart, start, end) {
+ if (!start) start = 0
+ if (!end && end !== 0) end = this.length
+ if (targetStart >= target.length) targetStart = target.length
+ if (!targetStart) targetStart = 0
+ if (end > 0 && end < start) end = start
+
+ // Copy 0 bytes; we're done
+ if (end === start) return 0
+ if (target.length === 0 || this.length === 0) return 0
+
+ // Fatal error conditions
+ if (targetStart < 0) {
+ throw new RangeError('targetStart out of bounds')
+ }
+ if (start < 0 || start >= this.length) throw new RangeError('sourceStart out of bounds')
+ if (end < 0) throw new RangeError('sourceEnd out of bounds')
+
+ // Are we oob?
+ if (end > this.length) end = this.length
+ if (target.length - targetStart < end - start) {
+ end = target.length - targetStart + start
+ }
+
+ var len = end - start
+ var i
+
+ if (this === target && start < targetStart && targetStart < end) {
+ // descending copy from end
+ for (i = len - 1; i >= 0; --i) {
+ target[i + targetStart] = this[i + start]
+ }
+ } else if (len < 1000 || !Buffer.TYPED_ARRAY_SUPPORT) {
+ // ascending copy from start
+ for (i = 0; i < len; ++i) {
+ target[i + targetStart] = this[i + start]
+ }
+ } else {
+ Uint8Array.prototype.set.call(
+ target,
+ this.subarray(start, start + len),
+ targetStart
+ )
+ }
+
+ return len
+}
+
+// Usage:
+// buffer.fill(number[, offset[, end]])
+// buffer.fill(buffer[, offset[, end]])
+// buffer.fill(string[, offset[, end]][, encoding])
+Buffer.prototype.fill = function fill (val, start, end, encoding) {
+ // Handle string cases:
+ if (typeof val === 'string') {
+ if (typeof start === 'string') {
+ encoding = start
+ start = 0
+ end = this.length
+ } else if (typeof end === 'string') {
+ encoding = end
+ end = this.length
+ }
+ if (val.length === 1) {
+ var code = val.charCodeAt(0)
+ if (code < 256) {
+ val = code
+ }
+ }
+ if (encoding !== undefined && typeof encoding !== 'string') {
+ throw new TypeError('encoding must be a string')
+ }
+ if (typeof encoding === 'string' && !Buffer.isEncoding(encoding)) {
+ throw new TypeError('Unknown encoding: ' + encoding)
+ }
+ } else if (typeof val === 'number') {
+ val = val & 255
+ }
+
+ // Invalid ranges are not set to a default, so can range check early.
+ if (start < 0 || this.length < start || this.length < end) {
+ throw new RangeError('Out of range index')
+ }
+
+ if (end <= start) {
+ return this
+ }
+
+ start = start >>> 0
+ end = end === undefined ? this.length : end >>> 0
+
+ if (!val) val = 0
+
+ var i
+ if (typeof val === 'number') {
+ for (i = start; i < end; ++i) {
+ this[i] = val
+ }
+ } else {
+ var bytes = Buffer.isBuffer(val)
+ ? val
+ : utf8ToBytes(new Buffer(val, encoding).toString())
+ var len = bytes.length
+ for (i = 0; i < end - start; ++i) {
+ this[i + start] = bytes[i % len]
+ }
+ }
+
+ return this
+}
+
+// HELPER FUNCTIONS
+// ================
+
+var INVALID_BASE64_RE = /[^+\/0-9A-Za-z-_]/g
+
+function base64clean (str) {
+ // Node strips out invalid characters like \n and \t from the string, base64-js does not
+ str = stringtrim(str).replace(INVALID_BASE64_RE, '')
+ // Node converts strings with length < 2 to ''
+ if (str.length < 2) return ''
+ // Node allows for non-padded base64 strings (missing trailing ===), base64-js does not
+ while (str.length % 4 !== 0) {
+ str = str + '='
+ }
+ return str
+}
+
+function stringtrim (str) {
+ if (str.trim) return str.trim()
+ return str.replace(/^\s+|\s+$/g, '')
+}
+
+function toHex (n) {
+ if (n < 16) return '0' + n.toString(16)
+ return n.toString(16)
+}
+
+function utf8ToBytes (string, units) {
+ units = units || Infinity
+ var codePoint
+ var length = string.length
+ var leadSurrogate = null
+ var bytes = []
+
+ for (var i = 0; i < length; ++i) {
+ codePoint = string.charCodeAt(i)
+
+ // is surrogate component
+ if (codePoint > 0xD7FF && codePoint < 0xE000) {
+ // last char was a lead
+ if (!leadSurrogate) {
+ // no lead yet
+ if (codePoint > 0xDBFF) {
+ // unexpected trail
+ if ((units -= 3) > -1) bytes.push(0xEF, 0xBF, 0xBD)
+ continue
+ } else if (i + 1 === length) {
+ // unpaired lead
+ if ((units -= 3) > -1) bytes.push(0xEF, 0xBF, 0xBD)
+ continue
+ }
+
+ // valid lead
+ leadSurrogate = codePoint
+
+ continue
+ }
+
+ // 2 leads in a row
+ if (codePoint < 0xDC00) {
+ if ((units -= 3) > -1) bytes.push(0xEF, 0xBF, 0xBD)
+ leadSurrogate = codePoint
+ continue
+ }
+
+ // valid surrogate pair
+ codePoint = (leadSurrogate - 0xD800 << 10 | codePoint - 0xDC00) + 0x10000
+ } else if (leadSurrogate) {
+ // valid bmp char, but last char was a lead
+ if ((units -= 3) > -1) bytes.push(0xEF, 0xBF, 0xBD)
+ }
+
+ leadSurrogate = null
+
+ // encode utf8
+ if (codePoint < 0x80) {
+ if ((units -= 1) < 0) break
+ bytes.push(codePoint)
+ } else if (codePoint < 0x800) {
+ if ((units -= 2) < 0) break
+ bytes.push(
+ codePoint >> 0x6 | 0xC0,
+ codePoint & 0x3F | 0x80
+ )
+ } else if (codePoint < 0x10000) {
+ if ((units -= 3) < 0) break
+ bytes.push(
+ codePoint >> 0xC | 0xE0,
+ codePoint >> 0x6 & 0x3F | 0x80,
+ codePoint & 0x3F | 0x80
+ )
+ } else if (codePoint < 0x110000) {
+ if ((units -= 4) < 0) break
+ bytes.push(
+ codePoint >> 0x12 | 0xF0,
+ codePoint >> 0xC & 0x3F | 0x80,
+ codePoint >> 0x6 & 0x3F | 0x80,
+ codePoint & 0x3F | 0x80
+ )
+ } else {
+ throw new Error('Invalid code point')
+ }
+ }
+
+ return bytes
+}
+
+function asciiToBytes (str) {
+ var byteArray = []
+ for (var i = 0; i < str.length; ++i) {
+ // Node's code seems to be doing this and not & 0x7F..
+ byteArray.push(str.charCodeAt(i) & 0xFF)
+ }
+ return byteArray
+}
+
+function utf16leToBytes (str, units) {
+ var c, hi, lo
+ var byteArray = []
+ for (var i = 0; i < str.length; ++i) {
+ if ((units -= 2) < 0) break
+
+ c = str.charCodeAt(i)
+ hi = c >> 8
+ lo = c % 256
+ byteArray.push(lo)
+ byteArray.push(hi)
+ }
+
+ return byteArray
+}
+
+function base64ToBytes (str) {
+ return base64.toByteArray(base64clean(str))
+}
+
+function blitBuffer (src, dst, offset, length) {
+ for (var i = 0; i < length; ++i) {
+ if ((i + offset >= dst.length) || (i >= src.length)) break
+ dst[i + offset] = src[i]
+ }
+ return i
+}
+
+function isnan (val) {
+ return val !== val // eslint-disable-line no-self-compare
+}
+
+/* WEBPACK VAR INJECTION */}.call(this, __webpack_require__(27)))
+
+/***/ }),
+/* 40 */
+/***/ (function(module, __webpack_exports__, __webpack_require__) {
+
+"use strict";
+/* WEBPACK VAR INJECTION */(function(setImmediate) {/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return nextFrame; });
+/**
+ * @license
+ * Copyright 2017 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+const delayCallback = (() => {
+ if (typeof requestAnimationFrame !== 'undefined') {
+ return requestAnimationFrame;
+ }
+ else if (typeof setImmediate !== 'undefined') {
+ return setImmediate;
+ }
+ return (f) => f(); // no delays
+})();
+/**
+ * Returns a promise that resolve when a requestAnimationFrame has completed.
+ *
+ * On Node.js this uses setImmediate instead of requestAnimationFrame.
+ *
+ * This is simply a sugar method so that users can do the following:
+ * `await tf.nextFrame();`
+ */
+/** @doc {heading: 'Performance', subheading: 'Timing'} */
+function nextFrame() {
+ return new Promise(resolve => delayCallback(() => resolve()));
+}
+
+//# sourceMappingURL=browser_util.js.map
+/* WEBPACK VAR INJECTION */}.call(this, __webpack_require__(76).setImmediate))
+
+/***/ }),
+/* 41 */
+/***/ (function(module, __webpack_exports__, __webpack_require__) {
+
+"use strict";
+__webpack_require__.r(__webpack_exports__);
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "json", function() { return json; });
+/**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+const json = [
+ {
+ 'tfOpName': 'Add',
+ 'category': 'arithmetic',
+ 'inputs': [
+ { 'start': 0, 'name': 'a', 'type': 'tensor' },
+ { 'start': 1, 'name': 'b', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'AddV2',
+ 'category': 'arithmetic',
+ 'inputs': [
+ { 'start': 0, 'name': 'a', 'type': 'tensor' },
+ { 'start': 1, 'name': 'b', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'AddN',
+ 'category': 'arithmetic',
+ 'inputs': [{ 'start': 0, 'end': 0, 'name': 'tensors', 'type': 'tensors' }]
+ },
+ {
+ 'tfOpName': 'BiasAdd',
+ 'category': 'arithmetic',
+ 'inputs': [
+ { 'start': 0, 'name': 'a', 'type': 'tensor' },
+ { 'start': 1, 'name': 'b', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'Sub',
+ 'category': 'arithmetic',
+ 'inputs': [
+ { 'start': 0, 'name': 'a', 'type': 'tensor' },
+ { 'start': 1, 'name': 'b', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'RealDiv',
+ 'category': 'arithmetic',
+ 'inputs': [
+ { 'start': 0, 'name': 'a', 'type': 'tensor' },
+ { 'start': 1, 'name': 'b', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'Div',
+ 'category': 'arithmetic',
+ 'inputs': [
+ { 'start': 0, 'name': 'a', 'type': 'tensor' },
+ { 'start': 1, 'name': 'b', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'DivNoNan',
+ 'category': 'arithmetic',
+ 'inputs': [
+ { 'start': 0, 'name': 'a', 'type': 'tensor' },
+ { 'start': 1, 'name': 'b', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'FloorDiv',
+ 'category': 'arithmetic',
+ 'inputs': [
+ { 'start': 0, 'name': 'a', 'type': 'tensor' },
+ { 'start': 1, 'name': 'b', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'Mul',
+ 'category': 'arithmetic',
+ 'inputs': [
+ { 'start': 0, 'name': 'a', 'type': 'tensor' },
+ { 'start': 1, 'name': 'b', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'Maximum',
+ 'category': 'arithmetic',
+ 'inputs': [
+ { 'start': 0, 'name': 'a', 'type': 'tensor' },
+ { 'start': 1, 'name': 'b', 'type': 'tensor' }
+ ]
+ },
+ {
+ 'tfOpName': 'Minimum',
+ 'category': 'arithmetic',
+ 'inputs': [
+ { 'start': 0, 'name': 'a', 'type': 'tensor' },
+ { 'start': 1, 'name': 'b', 'type': 'tensor' }
+ ]
+ },
+ {
+ 'tfOpName': 'Pow',
+ 'category': 'arithmetic',
+ 'inputs': [
+ { 'start': 0, 'name': 'a', 'type': 'tensor' },
+ { 'start': 1, 'name': 'b', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'SquaredDifference',
+ 'category': 'arithmetic',
+ 'inputs': [
+ { 'start': 0, 'name': 'a', 'type': 'tensor' },
+ { 'start': 1, 'name': 'b', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'Mod',
+ 'category': 'arithmetic',
+ 'inputs': [
+ { 'start': 0, 'name': 'a', 'type': 'tensor' },
+ { 'start': 1, 'name': 'b', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'FloorMod',
+ 'category': 'arithmetic',
+ 'inputs': [
+ { 'start': 0, 'name': 'a', 'type': 'tensor' },
+ { 'start': 1, 'name': 'b', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ }
+];
+//# sourceMappingURL=arithmetic.js.map
+
+/***/ }),
+/* 42 */
+/***/ (function(module, __webpack_exports__, __webpack_require__) {
+
+"use strict";
+__webpack_require__.r(__webpack_exports__);
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "json", function() { return json; });
+/**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+const json = [
+ {
+ 'tfOpName': 'Abs',
+ 'category': 'basic_math',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'Acos',
+ 'category': 'basic_math',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'Asin',
+ 'category': 'basic_math',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'Atan',
+ 'category': 'basic_math',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'Atan2',
+ 'category': 'basic_math',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ { 'start': 1, 'name': 'y', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'Ceil',
+ 'category': 'basic_math',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'ClipByValue',
+ 'category': 'basic_math',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'clip_value_min', 'name': 'clipValueMin', 'type': 'number' },
+ { 'tfName': 'clip_value_max', 'name': 'clipValueMax', 'type': 'number' }
+ ]
+ },
+ {
+ 'tfOpName': 'Complex',
+ 'category': 'basic_math',
+ 'inputs': [
+ { 'start': 0, 'name': 'real', 'type': 'tensor' },
+ { 'start': 1, 'name': 'imag', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'ComplexAbs',
+ 'category': 'basic_math',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'Cos',
+ 'category': 'basic_math',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'Cosh',
+ 'category': 'basic_math',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'Elu',
+ 'category': 'basic_math',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'Exp',
+ 'category': 'basic_math',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'Floor',
+ 'category': 'basic_math',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'Log',
+ 'category': 'basic_math',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'Imag',
+ 'category': 'basic_math',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }, {
+ 'tfName': 'Tout',
+ 'name': 'outputType',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Neg',
+ 'category': 'basic_math',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'Real',
+ 'category': 'basic_math',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }, {
+ 'tfName': 'Tout',
+ 'name': 'outputType',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Prelu',
+ 'category': 'basic_math',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ { 'start': 1, 'name': 'alpha', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'Relu',
+ 'category': 'basic_math',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'Relu6',
+ 'category': 'basic_math',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }, {
+ 'tfName': 'clipValueMin',
+ 'name': 'clipValueMin',
+ 'type': 'number',
+ 'defaultValue': 0
+ },
+ {
+ 'tfName': 'clipValueMax',
+ 'name': 'clipValueMax',
+ 'type': 'number',
+ 'defaultValue': 6
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Selu',
+ 'category': 'basic_math',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'Sigmoid',
+ 'category': 'basic_math',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'Sin',
+ 'category': 'basic_math',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'Sinh',
+ 'category': 'basic_math',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'Sqrt',
+ 'category': 'basic_math',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'Rsqrt',
+ 'category': 'basic_math',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'Square',
+ 'category': 'basic_math',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'Tan',
+ 'category': 'basic_math',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'Tanh',
+ 'category': 'basic_math',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'Sign',
+ 'category': 'basic_math',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'Round',
+ 'category': 'basic_math',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'Expm1',
+ 'category': 'basic_math',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'Log1p',
+ 'category': 'basic_math',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'Reciprocal',
+ 'category': 'basic_math',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'Softplus',
+ 'category': 'basic_math',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'Asinh',
+ 'category': 'basic_math',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'Acosh',
+ 'category': 'basic_math',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'Atanh',
+ 'category': 'basic_math',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'Erf',
+ 'category': 'basic_math',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'Prod',
+ 'category': 'basic_math',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ { 'start': 1, 'name': 'axes', 'type': 'number[]' },
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'keep_dims',
+ 'name': 'keepDims',
+ 'type': 'bool',
+ 'notSupported': true
+ },
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'LeakyRelu',
+ 'category': 'basic_math',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'alpha',
+ 'name': 'alpha',
+ 'type': 'number',
+ 'defaultValue': 0.2
+ },
+ {
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }
+ ]
+ }
+];
+//# sourceMappingURL=basic_math.js.map
+
+/***/ }),
+/* 43 */
+/***/ (function(module, __webpack_exports__, __webpack_require__) {
+
+"use strict";
+__webpack_require__.r(__webpack_exports__);
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "json", function() { return json; });
+/**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+const json = [
+ {
+ 'tfOpName': 'LoopCond',
+ 'category': 'control',
+ 'inputs': [{ 'start': 0, 'name': 'pred', 'type': 'tensor' }]
+ },
+ {
+ 'tfOpName': 'Switch',
+ 'category': 'control',
+ 'inputs': [
+ { 'start': 0, 'name': 'data', 'type': 'tensor' },
+ { 'start': 1, 'name': 'pred', 'type': 'tensor' }
+ ]
+ },
+ {
+ 'tfOpName': 'Merge',
+ 'category': 'control',
+ 'inputs': [{ 'start': 0, 'end': 0, 'name': 'tensors', 'type': 'tensors' }]
+ },
+ {
+ 'tfOpName': 'Enter',
+ 'category': 'control',
+ 'inputs': [
+ { 'start': 0, 'name': 'tensor', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true },
+ { 'tfName': 'frame_name', 'name': 'frameName', 'type': 'string' },
+ { 'tfName': 'is_constant', 'name': 'isConstant', 'type': 'bool' }
+ ]
+ },
+ {
+ 'tfOpName': 'Exit',
+ 'category': 'control',
+ 'inputs': [
+ { 'start': 0, 'name': 'tensor', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'NextIteration',
+ 'category': 'control',
+ 'inputs': [
+ { 'start': 0, 'name': 'tensor', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'TensorArrayV3',
+ 'category': 'control',
+ 'inputs': [
+ { 'start': 0, 'name': 'size', 'type': 'number' },
+ ],
+ 'attrs': [
+ { 'tfName': 'dtype', 'name': 'dtype', 'type': 'dtype' },
+ { 'tfName': 'element_shape', 'name': 'elementShape', 'type': 'shape' },
+ { 'tfName': 'dynamic_size', 'name': 'dynamicSize', 'type': 'bool' },
+ { 'tfName': 'clear_after_read', 'name': 'clearAfterRead', 'type': 'bool' },
+ {
+ 'tfName': 'identical_element_shapes',
+ 'name': 'identicalElementShapes',
+ 'type': 'bool'
+ },
+ { 'tfName': 'tensor_array_name', 'name': 'name', 'type': 'string' }
+ ]
+ },
+ {
+ 'tfOpName': 'TensorArrayWriteV3',
+ 'category': 'control',
+ 'inputs': [
+ { 'start': 0, 'name': 'tensorArrayId', 'type': 'number' },
+ { 'start': 1, 'name': 'index', 'type': 'number' },
+ { 'start': 2, 'name': 'tensor', 'type': 'tensor' },
+ { 'start': 3, 'name': 'flowIn', 'type': 'number' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'TensorArrayReadV3',
+ 'category': 'control',
+ 'inputs': [
+ { 'start': 0, 'name': 'tensorArrayId', 'type': 'number' },
+ { 'start': 1, 'name': 'index', 'type': 'number' },
+ { 'start': 2, 'name': 'flowIn', 'type': 'number' },
+ ],
+ 'attrs': [{
+ 'tfName': 'dtype',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }]
+ },
+ {
+ 'tfOpName': 'TensorArrayGatherV3',
+ 'category': 'control',
+ 'inputs': [
+ { 'start': 0, 'name': 'tensorArrayId', 'type': 'number' },
+ { 'start': 1, 'name': 'indices', 'type': 'number[]' },
+ { 'start': 2, 'name': 'flowIn', 'type': 'number' },
+ ],
+ 'attrs': [
+ { 'tfName': 'dtype', 'name': 'dtype', 'type': 'dtype' },
+ { 'tfName': 'element_shape', 'name': 'elementShape', 'type': 'shape' }
+ ]
+ },
+ {
+ 'tfOpName': 'TensorArrayScatterV3',
+ 'category': 'control',
+ 'inputs': [
+ { 'start': 0, 'name': 'tensorArrayId', 'type': 'number' },
+ { 'start': 1, 'name': 'indices', 'type': 'number[]' },
+ { 'start': 2, 'name': 'tensor', 'type': 'tensor' },
+ { 'start': 3, 'name': 'flowIn', 'type': 'number' },
+ ],
+ 'attrs': [{ 'tfName': 'T', 'name': 'dtype', 'type': 'dtype' }]
+ },
+ {
+ 'tfOpName': 'TensorArrayConcatV3',
+ 'category': 'control',
+ 'inputs': [
+ { 'start': 0, 'name': 'tensorArrayId', 'type': 'number' },
+ { 'start': 1, 'name': 'flowIn', 'type': 'number' },
+ ],
+ 'attrs': [
+ { 'tfName': 'dtype', 'name': 'dtype', 'type': 'dtype' }, {
+ 'tfName': 'element_shape_except0',
+ 'name': 'elementShapeExcept0',
+ 'type': 'shape',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'TensorArraySplitV3',
+ 'category': 'control',
+ 'inputs': [
+ { 'start': 0, 'name': 'tensorArrayId', 'type': 'number' },
+ { 'start': 1, 'name': 'tensor', 'type': 'tensor' },
+ { 'start': 2, 'name': 'lengths', 'type': 'number[]' },
+ { 'start': 3, 'name': 'flowIn', 'type': 'number' },
+ ],
+ 'attrs': [{ 'tfName': 'T', 'name': 'dtype', 'type': 'dtype' }]
+ },
+ {
+ 'tfOpName': 'TensorArraySizeV3',
+ 'category': 'control',
+ 'inputs': [
+ { 'start': 0, 'name': 'tensorArrayId', 'type': 'number' },
+ { 'start': 1, 'name': 'flowIn', 'type': 'number' }
+ ]
+ },
+ {
+ 'tfOpName': 'TensorArrayCloseV3',
+ 'category': 'control',
+ 'inputs': [{ 'start': 0, 'name': 'tensorArrayId', 'type': 'number' }]
+ },
+ {
+ 'tfOpName': 'StatelessIf',
+ 'category': 'control',
+ 'inputs': [
+ { 'start': 0, 'name': 'cond', 'type': 'tensor' },
+ { 'start': 1, 'end': 0, 'name': 'args', 'type': 'tensors' }
+ ],
+ 'attrs': [
+ { 'tfName': 'then_branch', 'name': 'thenBranch', 'type': 'func' },
+ { 'tfName': 'else_branch', 'name': 'elseBranch', 'type': 'func' }
+ ]
+ },
+ {
+ 'tfOpName': 'If',
+ 'category': 'control',
+ 'inputs': [
+ { 'start': 0, 'name': 'cond', 'type': 'tensor' },
+ { 'start': 1, 'end': 0, 'name': 'args', 'type': 'tensors' }
+ ],
+ 'attrs': [
+ { 'tfName': 'then_branch', 'name': 'thenBranch', 'type': 'func' },
+ { 'tfName': 'else_branch', 'name': 'elseBranch', 'type': 'func' }
+ ]
+ },
+ {
+ 'tfOpName': 'StatelessWhile',
+ 'category': 'control',
+ 'inputs': [
+ { 'start': 0, 'end': 0, 'name': 'args', 'type': 'tensors' },
+ ],
+ 'attrs': [
+ { 'tfName': 'cond', 'name': 'cond', 'type': 'func' },
+ { 'tfName': 'body', 'name': 'body', 'type': 'func' }
+ ]
+ },
+ {
+ 'tfOpName': 'While',
+ 'category': 'control',
+ 'inputs': [
+ { 'start': 0, 'end': 0, 'name': 'args', 'type': 'tensors' },
+ ],
+ 'attrs': [
+ { 'tfName': 'cond', 'name': 'cond', 'type': 'func' },
+ { 'tfName': 'body', 'name': 'body', 'type': 'func' }
+ ]
+ }
+];
+//# sourceMappingURL=control.js.map
+
+/***/ }),
+/* 44 */
+/***/ (function(module, __webpack_exports__, __webpack_require__) {
+
+"use strict";
+__webpack_require__.r(__webpack_exports__);
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "json", function() { return json; });
+/**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+const json = [
+ {
+ 'tfOpName': 'AvgPool',
+ 'category': 'convolution',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'strides', 'name': 'strides', 'type': 'number[]' },
+ { 'tfName': 'padding', 'name': 'pad', 'type': 'string' }, {
+ 'tfName': 'data_format',
+ 'name': 'dataFormat',
+ 'type': 'string',
+ 'notSupported': true
+ },
+ { 'tfName': 'ksize', 'name': 'kernelSize', 'type': 'number[]' },
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'MaxPool',
+ 'category': 'convolution',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'strides', 'name': 'strides', 'type': 'number[]' },
+ { 'tfName': 'padding', 'name': 'pad', 'type': 'string' }, {
+ 'tfName': 'data_format',
+ 'name': 'dataFormat',
+ 'type': 'string',
+ 'notSupported': true
+ },
+ { 'tfName': 'ksize', 'name': 'kernelSize', 'type': 'number[]' },
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'MaxPoolWithArgmax',
+ 'category': 'convolution',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'strides', 'name': 'strides', 'type': 'number[]' },
+ { 'tfName': 'padding', 'name': 'pad', 'type': 'string' },
+ { 'tfName': 'ksize', 'name': 'kernelSize', 'type': 'number[]' }, {
+ 'tfName': 'include_batch_in_index',
+ 'name': 'includeBatchInIndex',
+ 'type': 'bool'
+ },
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'AvgPool3D',
+ 'category': 'convolution',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'strides', 'name': 'strides', 'type': 'number[]' },
+ { 'tfName': 'padding', 'name': 'pad', 'type': 'string' }, {
+ 'tfName': 'data_format',
+ 'name': 'dataFormat',
+ 'type': 'string',
+ 'notSupported': true
+ },
+ { 'tfName': 'ksize', 'name': 'kernelSize', 'type': 'number[]' },
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'MaxPool3D',
+ 'category': 'convolution',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'strides', 'name': 'strides', 'type': 'number[]' },
+ { 'tfName': 'padding', 'name': 'pad', 'type': 'string' }, {
+ 'tfName': 'data_format',
+ 'name': 'dataFormat',
+ 'type': 'string',
+ 'notSupported': true
+ },
+ { 'tfName': 'ksize', 'name': 'kernelSize', 'type': 'number[]' },
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'Conv1D',
+ 'category': 'convolution',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ { 'start': 1, 'name': 'filter', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'stride', 'name': 'stride', 'type': 'number' },
+ { 'tfName': 'padding', 'name': 'pad', 'type': 'string' }, {
+ 'tfName': 'data_format',
+ 'name': 'dataFormat',
+ 'type': 'string',
+ 'defaultValue': 'NWC'
+ },
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }, {
+ 'tfName': 'dilation',
+ 'name': 'dilation',
+ 'type': 'number',
+ 'defaultValue': 1
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Conv2D',
+ 'category': 'convolution',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ { 'start': 1, 'name': 'filter', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true },
+ { 'tfName': 'strides', 'name': 'strides', 'type': 'number[]' },
+ { 'tfName': 'padding', 'name': 'pad', 'type': 'string' },
+ { 'tfName': 'useCudnnOnGpu', 'name': 'useCudnnOnGpu', 'type': 'bool' }, {
+ 'tfName': 'data_format',
+ 'name': 'dataFormat',
+ 'type': 'string',
+ 'defaultValue': 'NHWC'
+ },
+ {
+ 'tfName': 'explicit_paddings',
+ 'name': 'explicitPaddings',
+ 'type': 'number[]',
+ 'defaultValue': []
+ },
+ { 'tfName': 'dilations', 'name': 'dilations', 'type': 'number[]' }
+ ]
+ },
+ {
+ 'tfOpName': '_FusedConv2D',
+ 'category': 'convolution',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ { 'start': 1, 'name': 'filter', 'type': 'tensor' },
+ { 'start': 2, end: 0, 'name': 'args', 'type': 'tensors' },
+ ],
+ 'attrs': [
+ { 'tfName': 'num_args', 'name': 'numArgs', 'type': 'number' },
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true },
+ { 'tfName': 'strides', 'name': 'strides', 'type': 'number[]' },
+ { 'tfName': 'padding', 'name': 'pad', 'type': 'string' },
+ {
+ 'tfName': 'explicit_paddings',
+ 'name': 'explicitPaddings',
+ 'type': 'number[]',
+ 'defaultValue': []
+ },
+ {
+ 'tfName': 'use_cudnn_on_gpu',
+ 'name': 'useCudnnOnGpu',
+ 'type': 'bool',
+ 'defaultValue': true
+ },
+ {
+ 'tfName': 'data_format',
+ 'name': 'dataFormat',
+ 'type': 'string',
+ 'defaultValue': 'NHWC'
+ },
+ {
+ 'tfName': 'dilations',
+ 'name': 'dilations',
+ 'type': 'number[]',
+ 'defaultValue': [1, 1, 1, 1]
+ },
+ {
+ 'tfName': 'fused_ops',
+ 'name': 'fusedOps',
+ 'type': 'string[]',
+ 'defaultValue': []
+ },
+ {
+ 'tfName': 'epsilon',
+ 'name': 'epsilon',
+ 'type': 'number',
+ 'defaultValue': 0.0001
+ },
+ ]
+ },
+ {
+ 'tfOpName': 'Conv2DBackpropInput',
+ 'category': 'convolution',
+ 'inputs': [
+ { 'start': 2, 'name': 'x', 'type': 'tensor' },
+ { 'start': 1, 'name': 'filter', 'type': 'tensor' },
+ { 'start': 0, 'name': 'outputShape', 'type': 'number[]' },
+ ],
+ 'attrs': [
+ { 'tfName': 'strides', 'name': 'strides', 'type': 'number[]' },
+ { 'tfName': 'padding', 'name': 'pad', 'type': 'string' },
+ {
+ 'tfName': 'data_format',
+ 'name': 'dataFormat',
+ 'type': 'string',
+ 'notSupported': true
+ },
+ {
+ 'tfName': 'explicit_paddings',
+ 'name': 'explicitPaddings',
+ 'type': 'number[]',
+ 'defaultValue': []
+ },
+ ]
+ },
+ {
+ 'tfOpName': 'DepthwiseConv2d',
+ 'category': 'convolution',
+ 'inputs': [
+ { 'start': 0, 'name': 'input', 'type': 'tensor' },
+ { 'start': 1, 'name': 'filter', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'strides', 'name': 'strides', 'type': 'number[]' },
+ { 'tfName': 'padding', 'name': 'pad', 'type': 'string' }, {
+ 'tfName': 'data_format',
+ 'name': 'dataFormat',
+ 'type': 'string',
+ 'defaultValue': 'NHWC'
+ },
+ {
+ 'tfName': 'explicit_paddings',
+ 'name': 'explicitPaddings',
+ 'type': 'number[]',
+ 'defaultValue': []
+ },
+ { 'tfName': 'dilations', 'name': 'dilations', 'type': 'number[]' }
+ ]
+ },
+ {
+ 'tfOpName': 'DepthwiseConv2dNative',
+ 'category': 'convolution',
+ 'inputs': [
+ { 'start': 0, 'name': 'input', 'type': 'tensor' },
+ { 'start': 1, 'name': 'filter', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'strides', 'name': 'strides', 'type': 'number[]' },
+ { 'tfName': 'padding', 'name': 'pad', 'type': 'string' }, {
+ 'tfName': 'data_format',
+ 'name': 'dataFormat',
+ 'type': 'string',
+ 'defaultValue': 'NHWC'
+ },
+ {
+ 'tfName': 'explicit_paddings',
+ 'name': 'explicitPaddings',
+ 'type': 'number[]',
+ 'defaultValue': []
+ },
+ { 'tfName': 'dilations', 'name': 'dilations', 'type': 'number[]' }
+ ]
+ },
+ {
+ 'tfOpName': 'FusedDepthwiseConv2dNative',
+ 'category': 'convolution',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ { 'start': 1, 'name': 'filter', 'type': 'tensor' },
+ { 'start': 2, end: 0, 'name': 'args', 'type': 'tensors' },
+ ],
+ 'attrs': [
+ { 'tfName': 'num_args', 'name': 'numArgs', 'type': 'number' },
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true },
+ { 'tfName': 'strides', 'name': 'strides', 'type': 'number[]' },
+ { 'tfName': 'padding', 'name': 'pad', 'type': 'string' }, {
+ 'tfName': 'data_format',
+ 'name': 'dataFormat',
+ 'type': 'string',
+ 'defaultValue': 'NHWC'
+ },
+ {
+ 'tfName': 'dilations',
+ 'name': 'dilations',
+ 'type': 'number[]',
+ 'defaultValue': [1, 1, 1, 1]
+ },
+ {
+ 'tfName': 'fused_ops',
+ 'name': 'fusedOps',
+ 'type': 'string[]',
+ 'defaultValue': []
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Conv3D',
+ 'category': 'convolution',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ { 'start': 1, 'name': 'filter', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'strides', 'name': 'strides', 'type': 'number[]' },
+ { 'tfName': 'padding', 'name': 'pad', 'type': 'string' }, {
+ 'tfName': 'data_format',
+ 'name': 'dataFormat',
+ 'type': 'string',
+ 'defaultValue': 'NHWC'
+ },
+ { 'tfName': 'dilations', 'name': 'dilations', 'type': 'number[]' }
+ ],
+ }
+];
+//# sourceMappingURL=convolution.js.map
+
+/***/ }),
+/* 45 */
+/***/ (function(module, __webpack_exports__, __webpack_require__) {
+
+"use strict";
+__webpack_require__.r(__webpack_exports__);
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "json", function() { return json; });
+/**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+const json = [
+ {
+ 'tfOpName': 'Fill',
+ 'category': 'creation',
+ 'inputs': [
+ { 'start': 0, 'name': 'shape', 'type': 'number[]' },
+ { 'start': 1, 'name': 'value', 'type': 'number' },
+ ],
+ 'attrs': [{ 'tfName': 'T', 'name': 'dtype', 'type': 'dtype' }]
+ },
+ {
+ 'tfOpName': 'LinSpace',
+ 'category': 'creation',
+ 'inputs': [
+ { 'start': 0, 'name': 'start', 'type': 'number' },
+ { 'start': 1, 'name': 'stop', 'type': 'number' },
+ { 'start': 2, 'name': 'num', 'type': 'number' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'OneHot',
+ 'category': 'creation',
+ 'inputs': [
+ { 'start': 0, 'name': 'indices', 'type': 'tensor' },
+ { 'start': 1, 'name': 'depth', 'type': 'number' },
+ { 'start': 2, 'name': 'onValue', 'type': 'number', 'defaultValue': 1 },
+ { 'start': 3, 'name': 'offValue', 'type': 'number', 'defaultValue': 0 },
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'axis',
+ 'name': 'axis',
+ 'type': 'number',
+ 'notSupported': true
+ },
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'Ones',
+ 'category': 'creation',
+ 'inputs': [
+ { 'start': 0, 'name': 'shape', 'type': 'number[]' },
+ ],
+ 'attrs': [{ 'tfName': 'T', 'name': 'dtype', 'type': 'dtype' }]
+ },
+ {
+ 'tfOpName': 'OnesLike',
+ 'category': 'creation',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ ],
+ 'attrs': [{ 'tfName': 'dtype', 'name': 'dtype', 'type': 'dtype' }]
+ },
+ {
+ 'tfOpName': 'RandomUniform',
+ 'category': 'creation',
+ 'inputs': [
+ { 'start': 0, 'name': 'shape', 'type': 'number[]' },
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'minval',
+ 'name': 'minval',
+ 'type': 'number',
+ 'defaultValue': 0
+ },
+ {
+ 'tfName': 'maxval',
+ 'name': 'maxval',
+ 'type': 'number',
+ 'defaultValue': 1
+ },
+ { 'tfName': 'dtype', 'name': 'dtype', 'type': 'dtype' },
+ { 'tfName': 'seed', 'name': 'seed', 'type': 'number', 'defaultValue': 0 }, {
+ 'tfName': 'seed2',
+ 'name': 'seed2',
+ 'type': 'number',
+ 'defaultValue': 0,
+ 'notSupported': true
+ },
+ { 'tfName': 'T', 'name': 'T', 'type': 'number', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'Range',
+ 'category': 'creation',
+ 'inputs': [
+ { 'start': 0, 'name': 'start', 'type': 'number' },
+ { 'start': 1, 'name': 'stop', 'type': 'number' },
+ { 'start': 2, 'name': 'step', 'type': 'number', 'defaultValue': 0 },
+ ],
+ 'attrs': [{ 'tfName': 'Tidx', 'name': 'dtype', 'type': 'dtype' }]
+ },
+ {
+ 'tfOpName': 'TruncatedNormal',
+ 'category': 'creation',
+ 'inputs': [
+ { 'start': 0, 'name': 'shape', 'type': 'number[]' },
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'means',
+ 'name': 'mean',
+ 'type': 'number',
+ 'defaultValue': 0.0
+ },
+ {
+ 'tfName': 'stddev',
+ 'name': 'stdDev',
+ 'type': 'number',
+ 'defaultValue': 1.0
+ },
+ { 'tfName': 'seed', 'name': 'seed', 'type': 'number' }, {
+ 'tfName': 'seed2',
+ 'name': 'seed2',
+ 'type': 'number',
+ 'defaultValue': 0,
+ 'notSupported': true
+ },
+ { 'tfName': 'dtype', 'name': 'dtype', 'type': 'dtype' },
+ { 'tfName': 'T', 'name': 'T', 'type': 'number', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'Zeros',
+ 'category': 'creation',
+ 'inputs': [
+ { 'start': 0, 'name': 'shape', 'type': 'number[]' },
+ ],
+ 'attrs': [{ 'tfName': 'T', 'name': 'dtype', 'type': 'dtype' }]
+ },
+ {
+ 'tfOpName': 'ZerosLike',
+ 'category': 'creation',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ ],
+ 'attrs': [{ 'tfName': 'T', 'name': 'dtype', 'type': 'dtype' }]
+ },
+ {
+ 'tfOpName': 'Multinomial',
+ 'category': 'creation',
+ 'inputs': [
+ { 'start': 0, 'name': 'logits', 'type': 'tensor' },
+ { 'start': 1, 'name': 'numSamples', 'type': 'number' },
+ ],
+ 'attrs': [
+ { 'tfName': 'seed', 'name': 'seed', 'type': 'number' },
+ { 'tfName': 'seed2', 'name': 'seed2', 'type': 'number' },
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype' },
+ { 'tfName': 'output_dtype', 'name': 'output_dtype', 'type': 'dtype' }
+ ]
+ }
+];
+//# sourceMappingURL=creation.js.map
+
+/***/ }),
+/* 46 */
+/***/ (function(module, __webpack_exports__, __webpack_require__) {
+
+"use strict";
+__webpack_require__.r(__webpack_exports__);
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "json", function() { return json; });
+/**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+const json = [
+ {
+ 'tfOpName': 'NonMaxSuppressionV2',
+ 'category': 'dynamic',
+ 'inputs': [
+ { 'start': 0, 'name': 'boxes', 'type': 'tensor' },
+ { 'start': 1, 'name': 'scores', 'type': 'tensor' },
+ { 'start': 2, 'name': 'maxOutputSize', 'type': 'number' },
+ { 'start': 3, 'name': 'iouThreshold', 'type': 'number' }
+ ]
+ },
+ {
+ 'tfOpName': 'NonMaxSuppressionV3',
+ 'category': 'dynamic',
+ 'inputs': [
+ { 'start': 0, 'name': 'boxes', 'type': 'tensor' },
+ { 'start': 1, 'name': 'scores', 'type': 'tensor' },
+ { 'start': 2, 'name': 'maxOutputSize', 'type': 'number' },
+ { 'start': 3, 'name': 'iouThreshold', 'type': 'number' },
+ { 'start': 4, 'name': 'scoreThreshold', 'type': 'number' }
+ ]
+ },
+ {
+ 'tfOpName': 'NonMaxSuppressionV5',
+ 'category': 'dynamic',
+ 'inputs': [
+ { 'start': 0, 'name': 'boxes', 'type': 'tensor' },
+ { 'start': 1, 'name': 'scores', 'type': 'tensor' },
+ { 'start': 2, 'name': 'maxOutputSize', 'type': 'number' },
+ { 'start': 3, 'name': 'iouThreshold', 'type': 'number' },
+ { 'start': 4, 'name': 'scoreThreshold', 'type': 'number' },
+ { 'start': 5, 'name': 'softNmsSigma', 'type': 'number' }
+ ]
+ },
+ {
+ 'tfOpName': 'Where',
+ 'category': 'dynamic',
+ 'inputs': [
+ { 'start': 0, 'name': 'condition', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'ListDiff',
+ 'category': 'dynamic',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ { 'start': 1, 'name': 'y', 'type': 'tensor' },
+ ],
+ 'attrs': [{
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }]
+ }
+];
+//# sourceMappingURL=dynamic.js.map
+
+/***/ }),
+/* 47 */
+/***/ (function(module, __webpack_exports__, __webpack_require__) {
+
+"use strict";
+__webpack_require__.r(__webpack_exports__);
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "json", function() { return json; });
+/**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+const json = [{
+ 'tfOpName': 'TopKV2',
+ 'category': 'evaluation',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ { 'start': 1, 'name': 'k', 'type': 'number' },
+ ],
+ 'attrs': [{ 'tfName': 'sorted', 'name': 'sorted', 'type': 'bool' }]
+ }];
+//# sourceMappingURL=evaluation.js.map
+
+/***/ }),
+/* 48 */
+/***/ (function(module, __webpack_exports__, __webpack_require__) {
+
+"use strict";
+__webpack_require__.r(__webpack_exports__);
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "json", function() { return json; });
+/**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+const json = [
+ {
+ 'tfOpName': 'PlaceholderWithDefault',
+ 'category': 'graph',
+ 'inputs': [
+ { 'start': 0, 'name': 'default', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'shape', 'name': 'shape', 'type': 'shape' },
+ { 'tfName': 'dtype', 'name': 'dtype', 'type': 'dtype' }
+ ]
+ },
+ {
+ 'tfOpName': 'Placeholder',
+ 'category': 'graph',
+ 'attrs': [
+ { 'tfName': 'shape', 'name': 'shape', 'type': 'shape' },
+ { 'tfName': 'dtype', 'name': 'dtype', 'type': 'dtype' }
+ ]
+ },
+ { 'tfOpName': 'Const', 'category': 'graph' }, {
+ 'tfOpName': 'Identity',
+ 'category': 'graph',
+ 'inputs': [{ 'start': 0, 'name': 'x', 'type': 'tensor' }]
+ },
+ {
+ 'tfOpName': 'IdentityN',
+ 'category': 'graph',
+ 'inputs': [{ 'start': 0, 'end': 0, 'name': 'x', 'type': 'tensors' }]
+ },
+ {
+ 'tfOpName': 'Snapshot',
+ 'category': 'graph',
+ 'inputs': [{ 'start': 0, 'name': 'x', 'type': 'tensor' }]
+ },
+ {
+ 'tfOpName': 'Rank',
+ 'category': 'graph',
+ 'inputs': [{ 'start': 0, 'name': 'x', 'type': 'tensor' }]
+ },
+ {
+ 'tfOpName': 'Size',
+ 'category': 'graph',
+ 'inputs': [{ 'start': 0, 'name': 'x', 'type': 'tensor' }]
+ },
+ {
+ 'tfOpName': 'Shape',
+ 'category': 'graph',
+ 'inputs': [{ 'start': 0, 'name': 'x', 'type': 'tensor' }]
+ },
+ {
+ 'tfOpName': 'ShapeN',
+ 'category': 'graph',
+ 'inputs': [{ 'start': 0, 'end': 0, 'name': 'x', 'type': 'tensors' }]
+ },
+ {
+ 'tfOpName': 'Print',
+ 'category': 'graph',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ { 'start': 1, 'name': 'data', 'type': 'tensors' },
+ ],
+ 'attrs': [
+ { 'tfName': 'message', 'name': 'message', 'type': 'string' }, {
+ 'tfName': 'first_n',
+ 'name': 'firstN',
+ 'type': 'number',
+ 'notSupported': true
+ },
+ {
+ 'tfName': 'summarize',
+ 'name': 'summarize',
+ 'type': 'number',
+ 'defaultValue': 3
+ }
+ ]
+ },
+ { 'tfOpName': 'NoOp', 'category': 'graph', 'inputs': [] }, {
+ 'tfOpName': 'StopGradient',
+ 'category': 'graph',
+ 'inputs': [{ 'start': 0, 'name': 'x', 'type': 'tensor' }]
+ },
+ {
+ 'tfOpName': 'FakeQuantWithMinMaxVars',
+ 'category': 'graph',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'min', 'name': 'min', 'type': 'number' },
+ { 'tfName': 'max', 'name': 'max', 'type': 'number' }
+ ]
+ }
+];
+//# sourceMappingURL=graph.js.map
+
+/***/ }),
+/* 49 */
+/***/ (function(module, __webpack_exports__, __webpack_require__) {
+
+"use strict";
+__webpack_require__.r(__webpack_exports__);
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "json", function() { return json; });
+/**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+const json = [
+ {
+ 'tfOpName': 'ResizeBilinear',
+ 'category': 'image',
+ 'inputs': [
+ { 'start': 0, 'name': 'images', 'type': 'tensor' },
+ { 'start': 1, 'name': 'size', 'type': 'number[]' },
+ ],
+ 'attrs': [
+ { 'tfName': 'align_corners', 'name': 'alignCorners', 'type': 'bool' },
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'ResizeNearestNeighbor',
+ 'category': 'image',
+ 'inputs': [
+ { 'start': 0, 'name': 'images', 'type': 'tensor' },
+ { 'start': 1, 'name': 'size', 'type': 'number[]' },
+ ],
+ 'attrs': [
+ { 'tfName': 'align_corners', 'name': 'alignCorners', 'type': 'bool' },
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'CropAndResize',
+ 'category': 'image',
+ 'inputs': [
+ { 'start': 0, 'name': 'image', 'type': 'tensor' },
+ { 'start': 1, 'name': 'boxes', 'type': 'tensor' },
+ { 'start': 2, 'name': 'boxInd', 'type': 'tensor' },
+ { 'start': 3, 'name': 'cropSize', 'type': 'number[]' },
+ ],
+ 'attrs': [
+ { 'tfName': 'method', 'name': 'method', 'type': 'string' }, {
+ 'tfName': 'extrapolation_value',
+ 'name': 'extrapolationValue',
+ 'type': 'number'
+ }
+ ]
+ }
+];
+//# sourceMappingURL=image.js.map
+
+/***/ }),
+/* 50 */
+/***/ (function(module, __webpack_exports__, __webpack_require__) {
+
+"use strict";
+__webpack_require__.r(__webpack_exports__);
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "json", function() { return json; });
+/**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+const json = [
+ {
+ 'tfOpName': 'Equal',
+ 'category': 'logical',
+ 'inputs': [
+ { 'start': 0, 'name': 'a', 'type': 'tensor' },
+ { 'start': 1, 'name': 'b', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'NotEqual',
+ 'category': 'logical',
+ 'inputs': [
+ { 'start': 0, 'name': 'a', 'type': 'tensor' },
+ { 'start': 1, 'name': 'b', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'Greater',
+ 'category': 'logical',
+ 'inputs': [
+ { 'start': 0, 'name': 'a', 'type': 'tensor' },
+ { 'start': 1, 'name': 'b', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'GreaterEqual',
+ 'category': 'logical',
+ 'inputs': [
+ { 'start': 0, 'name': 'a', 'type': 'tensor' },
+ { 'start': 1, 'name': 'b', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'Less',
+ 'category': 'logical',
+ 'inputs': [
+ { 'start': 0, 'name': 'a', 'type': 'tensor' },
+ { 'start': 1, 'name': 'b', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'LessEqual',
+ 'category': 'logical',
+ 'inputs': [
+ { 'start': 0, 'name': 'a', 'type': 'tensor' },
+ { 'start': 1, 'name': 'b', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'LogicalAnd',
+ 'category': 'logical',
+ 'inputs': [
+ { 'start': 0, 'name': 'a', 'type': 'tensor' },
+ { 'start': 1, 'name': 'b', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'LogicalNot',
+ 'category': 'logical',
+ 'inputs': [
+ { 'start': 0, 'name': 'a', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'LogicalOr',
+ 'category': 'logical',
+ 'inputs': [
+ { 'start': 0, 'name': 'a', 'type': 'tensor' },
+ { 'start': 1, 'name': 'b', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'Select',
+ 'category': 'logical',
+ 'inputs': [
+ { 'start': 0, 'name': 'condition', 'type': 'tensor' },
+ { 'start': 1, 'name': 'a', 'type': 'tensor' },
+ { 'start': 2, 'name': 'b', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'SelectV2',
+ 'category': 'logical',
+ 'inputs': [
+ { 'start': 0, 'name': 'condition', 'type': 'tensor' },
+ { 'start': 1, 'name': 'a', 'type': 'tensor' },
+ { 'start': 2, 'name': 'b', 'type': 'tensor' },
+ ],
+ 'attrs': [{
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }]
+ }
+];
+//# sourceMappingURL=logical.js.map
+
+/***/ }),
+/* 51 */
+/***/ (function(module, __webpack_exports__, __webpack_require__) {
+
+"use strict";
+__webpack_require__.r(__webpack_exports__);
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "json", function() { return json; });
+/**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+const json = [
+ {
+ 'tfOpName': '_FusedMatMul',
+ 'category': 'matrices',
+ 'inputs': [
+ { 'start': 0, 'name': 'a', 'type': 'tensor' },
+ { 'start': 1, 'name': 'b', 'type': 'tensor' },
+ { 'start': 2, end: 0, 'name': 'args', 'type': 'tensors' },
+ ],
+ 'attrs': [
+ { 'tfName': 'num_args', 'name': 'numArgs', 'type': 'number' }, {
+ 'tfName': 'fused_ops',
+ 'name': 'fusedOps',
+ 'type': 'string[]',
+ 'defaultValue': []
+ },
+ {
+ 'tfName': 'epsilon',
+ 'name': 'epsilon',
+ 'type': 'number',
+ 'defaultValue': 0.0001
+ },
+ {
+ 'tfName': 'transpose_a',
+ 'name': 'transposeA',
+ 'type': 'bool',
+ 'defaultValue': false
+ },
+ {
+ 'tfName': 'transpose_b',
+ 'name': 'transposeB',
+ 'type': 'bool',
+ 'defaultValue': false
+ },
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'MatMul',
+ 'category': 'matrices',
+ 'inputs': [
+ { 'start': 0, 'name': 'a', 'type': 'tensor' },
+ { 'start': 1, 'name': 'b', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'transpose_a',
+ 'name': 'transposeA',
+ 'type': 'bool',
+ 'defaultValue': false
+ },
+ {
+ 'tfName': 'transpose_b',
+ 'name': 'transposeB',
+ 'type': 'bool',
+ 'defaultValue': false
+ },
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'BatchMatMul',
+ 'category': 'matrices',
+ 'inputs': [
+ { 'start': 0, 'name': 'a', 'type': 'tensor' },
+ { 'start': 1, 'name': 'b', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'adj_x',
+ 'name': 'transposeA',
+ 'type': 'bool',
+ 'defaultValue': false
+ },
+ {
+ 'tfName': 'adj_y',
+ 'name': 'transposeB',
+ 'type': 'bool',
+ 'defaultValue': false
+ },
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'BatchMatMulV2',
+ 'category': 'matrices',
+ 'inputs': [
+ { 'start': 0, 'name': 'a', 'type': 'tensor' },
+ { 'start': 1, 'name': 'b', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'adj_x',
+ 'name': 'transposeA',
+ 'type': 'bool',
+ 'defaultValue': false
+ },
+ {
+ 'tfName': 'adj_y',
+ 'name': 'transposeB',
+ 'type': 'bool',
+ 'defaultValue': false
+ },
+ { 'tfName': 'T', 'name': 'dtype', 'type': 'dtype', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'Transpose',
+ 'category': 'matrices',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ { 'start': 1, 'name': 'perm', 'type': 'number[]' },
+ ],
+ 'attrs': [{
+ 'tfName': 'T',
+ 'name': 'dtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ }]
+ }
+];
+//# sourceMappingURL=matrices.js.map
+
+/***/ }),
+/* 52 */
+/***/ (function(module, __webpack_exports__, __webpack_require__) {
+
+"use strict";
+__webpack_require__.r(__webpack_exports__);
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "json", function() { return json; });
+/**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+const json = [
+ {
+ 'tfOpName': 'FusedBatchNorm',
+ 'category': 'normalization',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ { 'start': 1, 'name': 'scale', 'type': 'tensor' },
+ { 'start': 2, 'name': 'offset', 'type': 'tensor' },
+ { 'start': 3, 'name': 'mean', 'type': 'tensor' },
+ { 'start': 4, 'name': 'variance', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'epsilon',
+ 'name': 'epsilon',
+ 'type': 'number',
+ 'defaultValue': 0.001
+ },
+ {
+ 'tfName': 'data_format',
+ 'name': 'dataFormat',
+ 'type': 'string',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'FusedBatchNormV2',
+ 'category': 'normalization',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ { 'start': 1, 'name': 'scale', 'type': 'tensor' },
+ { 'start': 2, 'name': 'offset', 'type': 'tensor' },
+ { 'start': 3, 'name': 'mean', 'type': 'tensor' },
+ { 'start': 4, 'name': 'variance', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'epsilon',
+ 'name': 'epsilon',
+ 'type': 'number',
+ 'defaultValue': 0.001
+ },
+ {
+ 'tfName': 'data_format',
+ 'name': 'dataFormat',
+ 'type': 'string',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'FusedBatchNormV3',
+ 'category': 'normalization',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ { 'start': 1, 'name': 'scale', 'type': 'tensor' },
+ { 'start': 2, 'name': 'offset', 'type': 'tensor' },
+ { 'start': 3, 'name': 'mean', 'type': 'tensor' },
+ { 'start': 4, 'name': 'variance', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'epsilon',
+ 'name': 'epsilon',
+ 'type': 'number',
+ 'defaultValue': 0.001
+ },
+ {
+ 'tfName': 'data_format',
+ 'name': 'dataFormat',
+ 'type': 'string',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'LRN',
+ 'category': 'normalization',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'depth_radius',
+ 'name': 'radius',
+ 'type': 'number',
+ 'defaultValue': 5
+ },
+ { 'tfName': 'bias', 'name': 'bias', 'type': 'number', 'defaultValue': 1.0 },
+ {
+ 'tfName': 'alpha',
+ 'name': 'alpha',
+ 'type': 'number',
+ 'defaultValue': 1.0
+ },
+ {
+ 'tfName': 'beta',
+ 'name': 'beta',
+ 'type': 'number',
+ 'defaultValue': 0.5
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Softmax',
+ 'category': 'normalization',
+ 'inputs': [{ 'start': 0, 'name': 'x', 'type': 'tensor' }]
+ },
+ {
+ 'tfOpName': 'LogSoftmax',
+ 'category': 'normalization',
+ 'inputs': [{ 'start': 0, 'name': 'x', 'type': 'tensor' }]
+ },
+ {
+ 'tfOpName': 'SparseToDense',
+ 'category': 'normalization',
+ 'inputs': [
+ { 'start': 0, 'name': 'sparseIndices', 'type': 'tensor' },
+ { 'start': 1, 'name': 'outputShape', 'type': 'number[]' },
+ { 'start': 2, 'name': 'sparseValues', 'type': 'tensor' },
+ { 'start': 3, 'name': 'defaultValue', 'type': 'tensor' },
+ ],
+ 'attrs': [{
+ 'tfName': 'validate_indices',
+ 'name': 'validateIndices',
+ 'type': 'bool',
+ 'defaultValue': true,
+ 'notSupported': true
+ }]
+ }
+];
+//# sourceMappingURL=normalization.js.map
+
+/***/ }),
+/* 53 */
+/***/ (function(module, __webpack_exports__, __webpack_require__) {
+
+"use strict";
+__webpack_require__.r(__webpack_exports__);
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "json", function() { return json; });
+/**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+const json = [
+ {
+ 'tfOpName': 'Max',
+ 'category': 'reduction',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ { 'start': 1, 'name': 'axis', 'type': 'number[]' },
+ ],
+ 'attrs': [{ 'tfName': 'keep_dims', 'name': 'keepDims', 'type': 'bool' }]
+ },
+ {
+ 'tfOpName': 'Mean',
+ 'category': 'reduction',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ { 'start': 1, 'name': 'axis', 'type': 'number[]' },
+ ],
+ 'attrs': [{ 'tfName': 'keep_dims', 'name': 'keepDims', 'type': 'bool' }]
+ },
+ {
+ 'tfOpName': 'Min',
+ 'category': 'reduction',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ { 'start': 1, 'name': 'axis', 'type': 'number[]' },
+ ],
+ 'attrs': [{ 'tfName': 'keep_dims', 'name': 'keepDims', 'type': 'bool' }]
+ },
+ {
+ 'tfOpName': 'Sum',
+ 'category': 'reduction',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ { 'start': 1, 'name': 'axis', 'type': 'number[]' },
+ ],
+ 'attrs': [{ 'tfName': 'keep_dims', 'name': 'keepDims', 'type': 'bool' }]
+ },
+ {
+ 'tfOpName': 'All',
+ 'category': 'reduction',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ { 'start': 1, 'name': 'axis', 'type': 'number[]' },
+ ],
+ 'attrs': [{ 'tfName': 'keep_dims', 'name': 'keepDims', 'type': 'bool' }]
+ },
+ {
+ 'tfOpName': 'Any',
+ 'category': 'reduction',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ { 'start': 1, 'name': 'axis', 'type': 'number[]' },
+ ],
+ 'attrs': [{ 'tfName': 'keep_dims', 'name': 'keepDims', 'type': 'bool' }]
+ },
+ {
+ 'tfOpName': 'ArgMax',
+ 'category': 'reduction',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ { 'start': 1, 'name': 'axis', 'type': 'number' }
+ ]
+ },
+ {
+ 'tfOpName': 'ArgMin',
+ 'category': 'reduction',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ { 'start': 1, 'name': 'axis', 'type': 'number' }
+ ]
+ },
+ {
+ 'tfOpName': 'Prod',
+ 'category': 'reduction',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ { 'start': 1, 'name': 'axis', 'type': 'number[]' },
+ ],
+ 'attrs': [{ 'tfName': 'keep_dims', 'name': 'keepDims', 'type': 'bool' }]
+ },
+ {
+ 'tfOpName': 'Cumsum',
+ 'category': 'reduction',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ { 'start': 1, 'name': 'axis', 'type': 'number' },
+ ],
+ 'attrs': [
+ { 'tfName': 'exclusive', 'name': 'exclusive', 'type': 'bool' },
+ { 'tfName': 'reverse', 'name': 'reverse', 'type': 'bool' }
+ ]
+ }
+];
+//# sourceMappingURL=reduction.js.map
+
+/***/ }),
+/* 54 */
+/***/ (function(module, __webpack_exports__, __webpack_require__) {
+
+"use strict";
+__webpack_require__.r(__webpack_exports__);
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "json", function() { return json; });
+/**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+const json = [
+ {
+ 'tfOpName': 'ConcatV2',
+ 'category': 'slice_join',
+ 'inputs': [
+ { 'start': 0, 'end': -1, 'name': 'tensors', 'type': 'tensors' },
+ { 'start': -1, 'name': 'axis', 'type': 'number' }
+ ],
+ 'attrs': [{ 'tfName': 'N', 'name': 'n', 'type': 'number', 'defaultValue': 2 }]
+ },
+ {
+ 'tfOpName': 'Concat',
+ 'category': 'slice_join',
+ 'inputs': [
+ { 'start': 1, 'end': 0, 'name': 'tensors', 'type': 'tensors' },
+ { 'start': 0, 'name': 'axis', 'type': 'number' }
+ ],
+ 'attrs': [{ 'tfName': 'N', 'name': 'n', 'type': 'number', 'defaultValue': 2 }]
+ },
+ {
+ 'tfOpName': 'GatherV2',
+ 'category': 'slice_join',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ { 'start': 1, 'name': 'indices', 'type': 'tensor' },
+ { 'start': 2, 'name': 'axis', 'type': 'number', 'defaultValue': 0 }
+ ]
+ },
+ {
+ 'tfOpName': 'Gather',
+ 'category': 'slice_join',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ { 'start': 1, 'name': 'indices', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'axis', 'name': 'axis', 'type': 'number', 'defaultValue': 0 }, {
+ 'tfName': 'validate_indices',
+ 'name': 'validateIndices',
+ 'type': 'bool',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Reverse',
+ 'category': 'slice_join',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ { 'start': 1, 'name': 'dims', 'type': 'bool', 'notSupported': true }
+ ]
+ },
+ {
+ 'tfOpName': 'ReverseV2',
+ 'category': 'slice_join',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ { 'start': 1, 'name': 'axis', 'type': 'number[]' }
+ ]
+ },
+ {
+ 'tfOpName': 'Slice',
+ 'category': 'slice_join',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ { 'start': 1, 'name': 'begin', 'type': 'number[]' },
+ { 'start': 2, 'name': 'size', 'type': 'number[]' }
+ ]
+ },
+ {
+ 'tfOpName': 'StridedSlice',
+ 'category': 'slice_join',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ { 'start': 1, 'name': 'begin', 'type': 'number[]' },
+ { 'start': 2, 'name': 'end', 'type': 'number[]' },
+ { 'start': 3, 'name': 'strides', 'type': 'number[]' },
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'begin_mask',
+ 'name': 'beginMask',
+ 'type': 'number',
+ 'defaultValue': 0
+ },
+ {
+ 'tfName': 'end_mask',
+ 'name': 'endMask',
+ 'type': 'number',
+ 'defaultValue': 0
+ },
+ {
+ 'tfName': 'new_axis_mask',
+ 'name': 'newAxisMask',
+ 'type': 'number',
+ 'defaultValue': 0
+ },
+ {
+ 'tfName': 'ellipsis_mask',
+ 'name': 'ellipsisMask',
+ 'type': 'number',
+ 'defaultValue': 0
+ },
+ {
+ 'tfName': 'shrink_axis_mask',
+ 'name': 'shrinkAxisMask',
+ 'type': 'number',
+ 'defaultValue': 0
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Pack',
+ 'category': 'slice_join',
+ 'inputs': [
+ { 'start': 0, 'end': 0, 'name': 'tensors', 'type': 'tensors' },
+ ],
+ 'attrs': [
+ { 'tfName': 'axis', 'name': 'axis', 'type': 'number', 'defaultValue': 0 }
+ ]
+ },
+ {
+ 'tfOpName': 'Unpack',
+ 'category': 'slice_join',
+ 'inputs': [
+ { 'start': 0, 'name': 'tensor', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'axis', 'name': 'axis', 'type': 'number', 'defaultValue': 0 }, {
+ 'tfName': 'num',
+ 'name': 'num',
+ 'type': 'number',
+ 'defaultValue': 0,
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Tile',
+ 'category': 'slice_join',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ { 'start': 1, 'name': 'reps', 'type': 'number[]' }
+ ]
+ },
+ {
+ 'tfOpName': 'Split',
+ 'category': 'slice_join',
+ 'inputs': [
+ { 'start': 0, 'name': 'axis', 'type': 'number', 'defaultValue': 0 },
+ { 'start': 1, 'name': 'x', 'type': 'tensor' },
+ ],
+ 'attrs': [{
+ 'tfName': 'num_split',
+ 'name': 'numOrSizeSplits',
+ 'type': 'number',
+ 'defaultValue': 1
+ }]
+ },
+ {
+ 'tfOpName': 'SplitV',
+ 'category': 'slice_join',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ { 'start': 1, 'name': 'numOrSizeSplits', 'type': 'number[]' },
+ { 'start': 2, 'name': 'axis', 'type': 'number', 'defaultValue': 0 }
+ ]
+ },
+ {
+ 'tfOpName': 'ScatterNd',
+ 'category': 'slice_join',
+ 'inputs': [
+ { 'start': 0, 'name': 'indices', 'type': 'tensor' },
+ { 'start': 1, 'name': 'values', 'type': 'tensor' },
+ { 'start': 2, 'name': 'shape', 'type': 'number[]' }
+ ]
+ },
+ {
+ 'tfOpName': 'GatherNd',
+ 'category': 'slice_join',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ { 'start': 1, 'name': 'indices', 'type': 'tensor' }
+ ]
+ },
+ {
+ 'tfOpName': 'SparseToDense',
+ 'category': 'slice_join',
+ 'inputs': [
+ { 'start': 0, 'name': 'sparseIndices', 'type': 'tensor' },
+ { 'start': 1, 'name': 'outputShape', 'type': 'number[]' },
+ { 'start': 2, 'name': 'sparseValues', 'type': 'tensor' },
+ { 'start': 3, 'name': 'defaultValue', 'type': 'tensor' },
+ ],
+ 'attrs': [{
+ 'tfName': 'validate_indices',
+ 'name': 'validateIndices',
+ 'type': 'bool',
+ 'defaultValue': false,
+ 'notSupported': true
+ }]
+ }
+];
+//# sourceMappingURL=slice_join.js.map
+
+/***/ }),
+/* 55 */
+/***/ (function(module, __webpack_exports__, __webpack_require__) {
+
+"use strict";
+__webpack_require__.r(__webpack_exports__);
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "json", function() { return json; });
+/**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+const json = [
+ {
+ 'tfOpName': 'FFT',
+ 'category': 'spectral',
+ 'inputs': [{ 'start': 0, 'name': 'x', 'type': 'tensor' }]
+ },
+ {
+ 'tfOpName': 'IFFT',
+ 'category': 'spectral',
+ 'inputs': [{ 'start': 0, 'name': 'x', 'type': 'tensor' }]
+ },
+ {
+ 'tfOpName': 'RFFT',
+ 'category': 'spectral',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' }, {
+ 'start': 1,
+ 'name': 'fft_length',
+ 'type': 'number',
+ 'notSupported': true
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'IRFFT',
+ 'category': 'spectral',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' }, {
+ 'start': 1,
+ 'name': 'fft_length',
+ 'type': 'number',
+ 'notSupported': true
+ }
+ ]
+ }
+];
+//# sourceMappingURL=spectral.js.map
+
+/***/ }),
+/* 56 */
+/***/ (function(module, __webpack_exports__, __webpack_require__) {
+
+"use strict";
+__webpack_require__.r(__webpack_exports__);
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "json", function() { return json; });
+/**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+const json = [
+ {
+ 'tfOpName': 'Cast',
+ 'category': 'transformation',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ {
+ 'tfName': 'SrcT',
+ 'name': 'sdtype',
+ 'type': 'dtype',
+ 'notSupported': true
+ },
+ { 'tfName': 'DstT', 'name': 'dtype', 'type': 'dtype' }
+ ]
+ },
+ {
+ 'tfOpName': 'ExpandDims',
+ 'category': 'transformation',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ { 'start': 1, 'name': 'axis', 'type': 'number' }
+ ]
+ },
+ {
+ 'tfOpName': 'Pad',
+ 'category': 'transformation',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ { 'start': 1, 'name': 'padding', 'type': 'number[]' },
+ ],
+ 'attrs': [{
+ 'tfName': 'constant_value',
+ 'name': 'constantValue',
+ 'type': 'number',
+ 'defaultValue': 0
+ }]
+ },
+ {
+ 'tfOpName': 'PadV2',
+ 'category': 'transformation',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ { 'start': 1, 'name': 'padding', 'type': 'number[]' }, {
+ 'start': 2,
+ 'name': 'constantValue',
+ 'type': 'number',
+ 'defaultValue': 0
+ }
+ ]
+ },
+ {
+ 'tfOpName': 'Reshape',
+ 'category': 'transformation',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ { 'start': 1, 'name': 'shape', 'type': 'number[]' }
+ ]
+ },
+ {
+ 'tfOpName': 'Squeeze',
+ 'category': 'transformation',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ ],
+ 'attrs': [{
+ 'tfName': 'axis',
+ 'tfDeprecatedName': 'squeeze_dims',
+ 'name': 'axis',
+ 'type': 'number[]'
+ }]
+ },
+ {
+ 'tfOpName': 'SpaceToBatchND',
+ 'category': 'transformation',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ { 'start': 1, 'name': 'blockShape', 'type': 'number[]' },
+ { 'start': 2, 'name': 'paddings', 'type': 'number[]' }
+ ]
+ },
+ {
+ 'tfOpName': 'BatchToSpaceND',
+ 'category': 'transformation',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ { 'start': 1, 'name': 'blockShape', 'type': 'number[]' },
+ { 'start': 2, 'name': 'crops', 'type': 'number[]' }
+ ]
+ },
+ {
+ 'tfOpName': 'DepthToSpace',
+ 'category': 'transformation',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ ],
+ 'attrs': [
+ { 'tfName': 'block_size', 'name': 'blockSize', 'type': 'number' },
+ { 'tfName': 'data_format', 'name': 'dataFormat', 'type': 'string' }
+ ]
+ },
+ {
+ 'tfOpName': 'BroadcastTo',
+ 'category': 'transformation',
+ 'inputs': [
+ { 'start': 0, 'name': 'x', 'type': 'tensor' },
+ { 'start': 1, 'name': 'shape', 'type': 'number[]' },
+ ],
+ 'attrs': []
+ }
+];
+//# sourceMappingURL=transformation.js.map
+
+/***/ }),
+/* 57 */
+/***/ (function(module, __webpack_exports__, __webpack_require__) {
+
+"use strict";
+/* WEBPACK VAR INJECTION */(function(Buffer) {/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return ByteChunkIterator; });
+/* harmony import */ var _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(0);
+/* harmony import */ var _lazy_iterator__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(14);
+/* harmony import */ var _string_iterator__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(58);
+/**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * =============================================================================
+ */
+
+
+
+class ByteChunkIterator extends _lazy_iterator__WEBPACK_IMPORTED_MODULE_1__[/* LazyIterator */ "a"] {
+ /**
+ * Decode a stream of UTF8-encoded byte arrays to a stream of strings.
+ *
+ * The byte arrays producetd from the ByteChunkIterator on which this is
+ * called will be interpreted as concatenated. No assumptions are made about
+ * the boundaries of the incoming chunks, so a multi-byte UTF8 encoding of a
+ * character may span the boundary between chunks. This naturally happens,
+ * for instance, when reading fixed-size byte arrays from a file.
+ */
+ decodeUTF8() {
+ return new Utf8Iterator(this);
+ }
+}
+// ============================================================================
+// The following private classes serve to implement the chainable methods
+// on ByteChunkIterator. Unfortunately they can't be placed in separate files,
+// due to resulting trouble with circular imports.
+// ============================================================================
+// We wanted multiple inheritance, e.g.
+// class Utf8Iterator extends QueueIterator, StringIterator
+// but the TypeScript mixin approach is a bit hacky, so we take this adapter
+// approach instead.
+class Utf8Iterator extends _string_iterator__WEBPACK_IMPORTED_MODULE_2__[/* StringIterator */ "a"] {
+ constructor(upstream) {
+ super();
+ this.upstream = upstream;
+ this.impl = new Utf8IteratorImpl(upstream);
+ }
+ summary() {
+ return this.impl.summary();
+ }
+ async next() {
+ return this.impl.next();
+ }
+}
+/**
+ * Decode a stream of UTF8-encoded byte arrays to a stream of strings.
+ *
+ * This is tricky because the incoming byte array boundaries may disrupt a
+ * multi-byte UTF8 character. Thus any incomplete character data at the end of
+ * a chunk must be carried over and prepended to the next chunk before
+ * decoding. Luckily with native decoder, TextDecoder in browser and
+ * string_decoder in node, byte array boundaries are handled automatically.
+ *
+ * In the context of an input pipeline for machine learning, UTF8 decoding is
+ * needed to parse text files containing training examples or prediction
+ * requests (e.g., formatted as CSV or JSON). We cannot use the built-in
+ * decoding provided by FileReader.readAsText() because here we are in a
+ * streaming context, which FileReader does not support.
+ *
+ * @param upstream A `LazyIterator` of `Uint8Arrays` containing UTF8-encoded
+ * text, which should be interpreted as concatenated. No assumptions are
+ * made about the boundaries of the incoming chunks, so a multi-byte UTF8
+ * encoding of a character may span the boundary between chunks. This
+ * naturally happens, for instance, when reading fixed-size byte arrays from a
+ * file.
+ */
+class Utf8IteratorImpl extends _lazy_iterator__WEBPACK_IMPORTED_MODULE_1__[/* OneToManyIterator */ "b"] {
+ constructor(upstream) {
+ super();
+ this.upstream = upstream;
+ if (Object(_tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__["env"])().get('IS_BROWSER')) {
+ this.decoder = new TextDecoder('utf-8');
+ }
+ else {
+ // tslint:disable-next-line:no-require-imports
+ const { StringDecoder } = __webpack_require__(78);
+ this.decoder = new StringDecoder('utf8');
+ }
+ }
+ summary() {
+ return `${this.upstream.summary()} -> Utf8`;
+ }
+ async pump() {
+ const chunkResult = await this.upstream.next();
+ let chunk;
+ if (chunkResult.done) {
+ return false;
+ }
+ else {
+ chunk = chunkResult.value;
+ }
+ let text;
+ if (Object(_tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__["env"])().get('IS_BROWSER')) {
+ text = this.decoder.decode(chunk, { stream: true });
+ }
+ else {
+ text = this.decoder.write(Buffer.from(chunk.buffer));
+ }
+ this.outputQueue.push(text);
+ return true;
+ }
+}
+//# sourceMappingURL=byte_chunk_iterator.js.map
+/* WEBPACK VAR INJECTION */}.call(this, __webpack_require__(39).Buffer))
+
+/***/ }),
+/* 58 */
+/***/ (function(module, __webpack_exports__, __webpack_require__) {
+
+"use strict";
+/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return StringIterator; });
+/* harmony import */ var _lazy_iterator__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(14);
+/**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * =============================================================================
+ */
+
+class StringIterator extends _lazy_iterator__WEBPACK_IMPORTED_MODULE_0__[/* LazyIterator */ "a"] {
+ /**
+ * Splits a string stream on a given separator.
+ *
+ * It is assumed that the incoming chunk boundaries have no semantic meaning,
+ * so conceptually the incoming stream is treated simply as the concatenation
+ * of its elements.
+ *
+ * The outgoing stream provides chunks corresponding to the results of the
+ * standard string split() operation (even if such a chunk spanned incoming
+ * chunks). The separators are not included.
+ *
+ * A typical usage is to split a text file (represented as a stream with
+ * arbitrary chunk boundaries) into lines.
+ *
+ * @param upstream A readable stream of strings that can be treated as
+ * concatenated.
+ * @param separator A character to split on.
+ */
+ split(separator) {
+ return new SplitIterator(this, separator);
+ }
+}
+// ============================================================================
+// The following private classes serve to implement the chainable methods
+// on StringIterator. Unfortunately they can't be placed in separate files, due
+// to resulting trouble with circular imports.
+// ============================================================================
+// We wanted multiple inheritance, e.g.
+// class SplitIterator extends QueueIterator, StringIterator
+// but the TypeScript mixin approach is a bit hacky, so we take this adapter
+// approach instead.
+class SplitIterator extends StringIterator {
+ constructor(upstream, separator) {
+ super();
+ this.upstream = upstream;
+ this.impl = new SplitIteratorImpl(upstream, separator);
+ }
+ summary() {
+ return this.impl.summary();
+ }
+ async next() {
+ return this.impl.next();
+ }
+}
+class SplitIteratorImpl extends _lazy_iterator__WEBPACK_IMPORTED_MODULE_0__[/* OneToManyIterator */ "b"] {
+ constructor(upstream, separator) {
+ super();
+ this.upstream = upstream;
+ this.separator = separator;
+ // A partial string at the end of an upstream chunk
+ this.carryover = '';
+ }
+ summary() {
+ return `${this.upstream.summary()} -> Split('${this.separator}')`;
+ }
+ async pump() {
+ const chunkResult = await this.upstream.next();
+ if (chunkResult.done) {
+ if (this.carryover === '') {
+ return false;
+ }
+ // Pretend that the pump succeeded in order to emit the small last batch.
+ // The next pump() call will actually fail.
+ this.outputQueue.push(this.carryover);
+ this.carryover = '';
+ return true;
+ }
+ const lines = chunkResult.value.split(this.separator);
+ // Note the behavior: " ab ".split(' ') === ['', 'ab', '']
+ // Thus the carryover may be '' if the separator falls on a chunk
+ // boundary; this produces the correct result.
+ lines[0] = this.carryover + lines[0];
+ for (const line of lines.slice(0, -1)) {
+ this.outputQueue.push(line);
+ }
+ this.carryover = lines[lines.length - 1];
+ return true;
+ }
+}
+//# sourceMappingURL=string_iterator.js.map
+
+/***/ }),
+/* 59 */
+/***/ (function(module, exports, __webpack_require__) {
+
+"use strict";
+
+Object.defineProperty(exports, "__esModule", { value: true });
+const blazeface = __webpack_require__(81);
+const tfconv = __webpack_require__(38);
+const tf = __webpack_require__(0);
+const keypoints_1 = __webpack_require__(82);
+const pipeline_1 = __webpack_require__(83);
+const uv_coords_1 = __webpack_require__(85);
+const FACEMESH_GRAPHMODEL_PATH = 'https://tfhub.dev/mediapipe/tfjs-model/facemesh/1/default/1';
+const MESH_MODEL_INPUT_WIDTH = 192;
+const MESH_MODEL_INPUT_HEIGHT = 192;
+async function load({ maxContinuousChecks = 5, detectionConfidence = 0.9, maxFaces = 10, iouThreshold = 0.3, scoreThreshold = 0.75 } = {}) {
+ const [blazeFace, blazeMeshModel] = await Promise.all([
+ loadDetectorModel(maxFaces, iouThreshold, scoreThreshold), loadMeshModel()
+ ]);
+ const faceMesh = new FaceMesh(blazeFace, blazeMeshModel, maxContinuousChecks, detectionConfidence, maxFaces);
+ return faceMesh;
+}
+exports.load = load;
+async function loadDetectorModel(maxFaces, iouThreshold, scoreThreshold) {
+ return blazeface.load({ maxFaces, iouThreshold, scoreThreshold });
+}
+async function loadMeshModel() {
+ return tfconv.loadGraphModel(FACEMESH_GRAPHMODEL_PATH, { fromTFHub: true });
+}
+function getInputTensorDimensions(input) {
+ return input instanceof tf.Tensor ? [input.shape[0], input.shape[1]] :
+ [input.height, input.width];
+}
+function flipFaceHorizontal(face, imageWidth) {
+ if (face.mesh instanceof tf.Tensor) {
+ const [topLeft, bottomRight, mesh, scaledMesh] = tf.tidy(() => {
+ const subtractBasis = tf.tensor1d([imageWidth - 1, 0, 0]);
+ const multiplyBasis = tf.tensor1d([1, -1, 1]);
+ return tf.tidy(() => {
+ return [
+ tf.concat([
+ tf.sub(imageWidth - 1, face.boundingBox.topLeft.slice(0, 1)),
+ face.boundingBox.topLeft.slice(1, 1)
+ ]),
+ tf.concat([
+ tf.sub(imageWidth - 1, face.boundingBox.bottomRight.slice(0, 1)),
+ face.boundingBox.bottomRight.slice(1, 1)
+ ]),
+ tf.sub(subtractBasis, face.mesh).mul(multiplyBasis),
+ tf.sub(subtractBasis, face.scaledMesh).mul(multiplyBasis)
+ ];
+ });
+ });
+ return Object.assign({}, face, { boundingBox: { topLeft, bottomRight }, mesh, scaledMesh });
+ }
+ return Object.assign({}, face, {
+ boundingBox: {
+ topLeft: [
+ imageWidth - 1 - face.boundingBox.topLeft[0],
+ face.boundingBox.topLeft[1]
+ ],
+ bottomRight: [
+ imageWidth - 1 - face.boundingBox.bottomRight[0],
+ face.boundingBox.bottomRight[1]
+ ]
+ },
+ mesh: (face.mesh).map(coord => {
+ const flippedCoord = coord.slice(0);
+ flippedCoord[0] = imageWidth - 1 - coord[0];
+ return flippedCoord;
+ }),
+ scaledMesh: face.scaledMesh.map(coord => {
+ const flippedCoord = coord.slice(0);
+ flippedCoord[0] = imageWidth - 1 - coord[0];
+ return flippedCoord;
+ })
+ });
+}
+class FaceMesh {
+ constructor(blazeFace, blazeMeshModel, maxContinuousChecks, detectionConfidence, maxFaces) {
+ this.pipeline = new pipeline_1.Pipeline(blazeFace, blazeMeshModel, MESH_MODEL_INPUT_WIDTH, MESH_MODEL_INPUT_HEIGHT, maxContinuousChecks, maxFaces);
+ this.detectionConfidence = detectionConfidence;
+ }
+ static getAnnotations() {
+ return keypoints_1.MESH_ANNOTATIONS;
+ }
+ static getUVCoords() {
+ return uv_coords_1.UV_COORDS;
+ }
+ async estimateFaces(input, returnTensors = false, flipHorizontal = false) {
+ const [, width] = getInputTensorDimensions(input);
+ const image = tf.tidy(() => {
+ if (!(input instanceof tf.Tensor)) {
+ input = tf.browser.fromPixels(input);
+ }
+ return input.toFloat().expandDims(0);
+ });
+ const savedWebglPackDepthwiseConvFlag = tf.env().get('WEBGL_PACK_DEPTHWISECONV');
+ tf.env().set('WEBGL_PACK_DEPTHWISECONV', true);
+ const predictions = await this.pipeline.predict(image);
+ tf.env().set('WEBGL_PACK_DEPTHWISECONV', savedWebglPackDepthwiseConvFlag);
+ image.dispose();
+ if (predictions != null && predictions.length > 0) {
+ return Promise.all(predictions.map(async (prediction, i) => {
+ const { coords, scaledCoords, box, flag } = prediction;
+ let tensorsToRead = [flag];
+ if (!returnTensors) {
+ tensorsToRead = tensorsToRead.concat([coords, scaledCoords, box.startPoint, box.endPoint]);
+ }
+ const tensorValues = await Promise.all(tensorsToRead.map(async (d) => d.array()));
+ const flagValue = tensorValues[0];
+ flag.dispose();
+ if (flagValue < this.detectionConfidence) {
+ this.pipeline.clearRegionOfInterest(i);
+ }
+ if (returnTensors) {
+ const annotatedPrediction = {
+ faceInViewConfidence: flagValue,
+ mesh: coords,
+ scaledMesh: scaledCoords,
+ boundingBox: {
+ topLeft: box.startPoint.squeeze(),
+ bottomRight: box.endPoint.squeeze()
+ }
+ };
+ if (flipHorizontal) {
+ return flipFaceHorizontal(annotatedPrediction, width);
+ }
+ return annotatedPrediction;
+ }
+ const [coordsArr, coordsArrScaled, topLeft, bottomRight] = tensorValues.slice(1);
+ scaledCoords.dispose();
+ coords.dispose();
+ let annotatedPrediction = {
+ faceInViewConfidence: flagValue,
+ boundingBox: { topLeft, bottomRight },
+ mesh: coordsArr,
+ scaledMesh: coordsArrScaled
+ };
+ if (flipHorizontal) {
+ annotatedPrediction =
+ flipFaceHorizontal(annotatedPrediction, width);
+ }
+ const annotations = {};
+ for (const key in keypoints_1.MESH_ANNOTATIONS) {
+ annotations[key] = keypoints_1.MESH_ANNOTATIONS[key].map(index => annotatedPrediction.scaledMesh[index]);
+ }
+ annotatedPrediction['annotations'] = annotations;
+ return annotatedPrediction;
+ }));
+ }
+ return [];
+ }
+}
+exports.FaceMesh = FaceMesh;
+//# sourceMappingURL=index.js.map
+
+/***/ }),
+/* 60 */
+/***/ (function(module, exports, __webpack_require__) {
+
+"use strict";
+/**
+ * @license
+ * Copyright 2020 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+
+Object.defineProperty(exports, '__esModule', { value: true });
+
+var tfjsCore = __webpack_require__(0);
+var tfjsLayers = __webpack_require__(87);
+var tfjsConverter = __webpack_require__(38);
+var tfjsData = __webpack_require__(88);
+var tfjsBackendCpu = __webpack_require__(89);
+var tfjsBackendWebgl = __webpack_require__(86);
+
+/** @license See the LICENSE file. */
+// This code is auto-generated, do not modify this file!
+var version = '2.0.1';
+
+/**
+ * @license
+ * Copyright 2018 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+var version$1 = {
+ 'tfjs-core': tfjsCore.version_core,
+ 'tfjs-backend-cpu': tfjsBackendCpu.version_cpu,
+ 'tfjs-backend-webgl': tfjsBackendWebgl.version_webgl,
+ 'tfjs-data': tfjsData.version_data,
+ 'tfjs-layers': tfjsLayers.version_layers,
+ 'tfjs-converter': tfjsConverter.version_converter,
+ 'tfjs': version
+};
+
+Object.keys(tfjsCore).forEach(function (k) {
+ if (k !== 'default') Object.defineProperty(exports, k, {
+ enumerable: true,
+ get: function () {
+ return tfjsCore[k];
+ }
+ });
+});
+Object.keys(tfjsLayers).forEach(function (k) {
+ if (k !== 'default') Object.defineProperty(exports, k, {
+ enumerable: true,
+ get: function () {
+ return tfjsLayers[k];
+ }
+ });
+});
+Object.keys(tfjsConverter).forEach(function (k) {
+ if (k !== 'default') Object.defineProperty(exports, k, {
+ enumerable: true,
+ get: function () {
+ return tfjsConverter[k];
+ }
+ });
+});
+exports.data = tfjsData;
+exports.version = version$1;
+//# sourceMappingURL=tf.node.js.map
+
+
+/***/ }),
+/* 61 */
+/***/ (function(module, __webpack_exports__, __webpack_require__) {
+
+"use strict";
+/* WEBPACK VAR INJECTION */(function(process) {/* harmony import */ var _device_util__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(36);
+/* harmony import */ var _environment__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(10);
+/**
+ * @license
+ * Copyright 2019 Google Inc. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+
+const ENV = Object(_environment__WEBPACK_IMPORTED_MODULE_1__[/* env */ "c"])();
+/**
+ * This file contains environment-related flag registrations.
+ */
+/** Whether to enable debug mode. */
+ENV.registerFlag('DEBUG', () => false, debugValue => {
+ if (debugValue) {
+ console.warn('Debugging mode is ON. The output of every math call will ' +
+ 'be downloaded to CPU and checked for NaNs. ' +
+ 'This significantly impacts performance.');
+ }
+});
+/** Whether we are in a browser (as versus, say, node.js) environment. */
+ENV.registerFlag('IS_BROWSER', () => _device_util__WEBPACK_IMPORTED_MODULE_0__["isBrowser"]());
+/** Whether we are in a browser (as versus, say, node.js) environment. */
+ENV.registerFlag('IS_NODE', () => (typeof process !== 'undefined') &&
+ (typeof process.versions !== 'undefined') &&
+ (typeof process.versions.node !== 'undefined'));
+/** Whether this browser is Chrome. */
+ENV.registerFlag('IS_CHROME', () => typeof navigator !== 'undefined' && navigator != null &&
+ navigator.userAgent != null && /Chrome/.test(navigator.userAgent) &&
+ /Google Inc/.test(navigator.vendor));
+/**
+ * True when the environment is "production" where we disable safety checks
+ * to gain performance.
+ */
+ENV.registerFlag('PROD', () => false);
+/**
+ * Whether to do sanity checks when inferring a shape from user-provided
+ * values, used when creating a new tensor.
+ */
+ENV.registerFlag('TENSORLIKE_CHECK_SHAPE_CONSISTENCY', () => ENV.getBool('DEBUG'));
+/** Whether deprecation warnings are enabled. */
+ENV.registerFlag('DEPRECATION_WARNINGS_ENABLED', () => true);
+/** True if running unit tests. */
+ENV.registerFlag('IS_TEST', () => false);
+//# sourceMappingURL=flags.js.map
+/* WEBPACK VAR INJECTION */}.call(this, __webpack_require__(35)))
+
+/***/ }),
+/* 62 */
+/***/ (function(module, __webpack_exports__, __webpack_require__) {
+
+"use strict";
+/* WEBPACK VAR INJECTION */(function(process) {/* unused harmony export getNodeFetch */
+/* unused harmony export resetSystemFetch */
+/* unused harmony export setSystemFetch */
+/* unused harmony export getSystemFetch */
+/* unused harmony export PlatformNode */
+/* harmony import */ var _environment__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(10);
+/**
+ * @license
+ * Copyright 2019 Google LLC. All Rights Reserved.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * =============================================================================
+ */
+
+// We are wrapping this within an object so it can be stubbed by Jasmine.
+const getNodeFetch = {
+ // tslint:disable-next-line:no-require-imports
+ importFetch: () => __webpack_require__(63)
+};
+let systemFetch;
+// These getters and setters are for testing so we don't export a mutable
+// variable.
+function resetSystemFetch() {
+ systemFetch = null;
+}
+function setSystemFetch(fetchFn) {
+ systemFetch = fetchFn;
+}
+function getSystemFetch() {
+ return systemFetch;
+}
+class PlatformNode {
+ constructor() {
+ // tslint:disable-next-line:no-require-imports
+ this.util = __webpack_require__(64);
+ // According to the spec, the built-in encoder can do only UTF-8 encoding.
+ // https://developer.mozilla.org/en-US/docs/Web/API/TextEncoder/TextEncoder
+ this.textEncoder = new this.util.TextEncoder();
+ }
+ fetch(path, requestInits) {
+ if (Object(_environment__WEBPACK_IMPORTED_MODULE_0__[/* env */ "c"])().global.fetch != null) {
+ return Object(_environment__WEBPACK_IMPORTED_MODULE_0__[/* env */ "c"])().global.fetch(path, requestInits);
+ }
+ if (systemFetch == null) {
+ systemFetch = getNodeFetch.importFetch();
+ }
+ return systemFetch(path, requestInits);
+ }
+ now() {
+ const time = process.hrtime();
+ return time[0] * 1000 + time[1] / 1000000;
+ }
+ encode(text, encoding) {
+ if (encoding !== 'utf-8' && encoding !== 'utf8') {
+ throw new Error(`Node built-in encoder only supports utf-8, but got ${encoding}`);
+ }
+ return this.textEncoder.encode(text);
+ }
+ decode(bytes, encoding) {
+ if (bytes.length === 0) {
+ return '';
+ }
+ return new this.util.TextDecoder(encoding).decode(bytes);
+ }
+}
+if (Object(_environment__WEBPACK_IMPORTED_MODULE_0__[/* env */ "c"])().get('IS_NODE')) {
+ Object(_environment__WEBPACK_IMPORTED_MODULE_0__[/* env */ "c"])().setPlatform('node', new PlatformNode());
+}
+//# sourceMappingURL=platform_node.js.map
+/* WEBPACK VAR INJECTION */}.call(this, __webpack_require__(35)))
+
+/***/ }),
+/* 63 */
+/***/ (function(module, exports) {
+
+/* (ignored) */
+
+/***/ }),
+/* 64 */
+/***/ (function(module, exports) {
+
+/* (ignored) */
+
+/***/ }),
+/* 65 */
+/***/ (function(module, exports, __webpack_require__) {
+
+"use strict";
+
+
+exports.byteLength = byteLength
+exports.toByteArray = toByteArray
+exports.fromByteArray = fromByteArray
+
+var lookup = []
+var revLookup = []
+var Arr = typeof Uint8Array !== 'undefined' ? Uint8Array : Array
+
+var code = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
+for (var i = 0, len = code.length; i < len; ++i) {
+ lookup[i] = code[i]
+ revLookup[code.charCodeAt(i)] = i
+}
+
+// Support decoding URL-safe base64 strings, as Node.js does.
+// See: https://en.wikipedia.org/wiki/Base64#URL_applications
+revLookup['-'.charCodeAt(0)] = 62
+revLookup['_'.charCodeAt(0)] = 63
+
+function getLens (b64) {
+ var len = b64.length
+
+ if (len % 4 > 0) {
+ throw new Error('Invalid string. Length must be a multiple of 4')
+ }
+
+ // Trim off extra bytes after placeholder bytes are found
+ // See: https://github.com/beatgammit/base64-js/issues/42
+ var validLen = b64.indexOf('=')
+ if (validLen === -1) validLen = len
+
+ var placeHoldersLen = validLen === len
+ ? 0
+ : 4 - (validLen % 4)
+
+ return [validLen, placeHoldersLen]
+}
+
+// base64 is 4/3 + up to two characters of the original data
+function byteLength (b64) {
+ var lens = getLens(b64)
+ var validLen = lens[0]
+ var placeHoldersLen = lens[1]
+ return ((validLen + placeHoldersLen) * 3 / 4) - placeHoldersLen
+}
+
+function _byteLength (b64, validLen, placeHoldersLen) {
+ return ((validLen + placeHoldersLen) * 3 / 4) - placeHoldersLen
+}
+
+function toByteArray (b64) {
+ var tmp
+ var lens = getLens(b64)
+ var validLen = lens[0]
+ var placeHoldersLen = lens[1]
+
+ var arr = new Arr(_byteLength(b64, validLen, placeHoldersLen))
+
+ var curByte = 0
+
+ // if there are placeholders, only get up to the last complete 4 chars
+ var len = placeHoldersLen > 0
+ ? validLen - 4
+ : validLen
+
+ var i
+ for (i = 0; i < len; i += 4) {
+ tmp =
+ (revLookup[b64.charCodeAt(i)] << 18) |
+ (revLookup[b64.charCodeAt(i + 1)] << 12) |
+ (revLookup[b64.charCodeAt(i + 2)] << 6) |
+ revLookup[b64.charCodeAt(i + 3)]
+ arr[curByte++] = (tmp >> 16) & 0xFF
+ arr[curByte++] = (tmp >> 8) & 0xFF
+ arr[curByte++] = tmp & 0xFF
+ }
+
+ if (placeHoldersLen === 2) {
+ tmp =
+ (revLookup[b64.charCodeAt(i)] << 2) |
+ (revLookup[b64.charCodeAt(i + 1)] >> 4)
+ arr[curByte++] = tmp & 0xFF
+ }
+
+ if (placeHoldersLen === 1) {
+ tmp =
+ (revLookup[b64.charCodeAt(i)] << 10) |
+ (revLookup[b64.charCodeAt(i + 1)] << 4) |
+ (revLookup[b64.charCodeAt(i + 2)] >> 2)
+ arr[curByte++] = (tmp >> 8) & 0xFF
+ arr[curByte++] = tmp & 0xFF
+ }
+
+ return arr
+}
+
+function tripletToBase64 (num) {
+ return lookup[num >> 18 & 0x3F] +
+ lookup[num >> 12 & 0x3F] +
+ lookup[num >> 6 & 0x3F] +
+ lookup[num & 0x3F]
+}
+
+function encodeChunk (uint8, start, end) {
+ var tmp
+ var output = []
+ for (var i = start; i < end; i += 3) {
+ tmp =
+ ((uint8[i] << 16) & 0xFF0000) +
+ ((uint8[i + 1] << 8) & 0xFF00) +
+ (uint8[i + 2] & 0xFF)
+ output.push(tripletToBase64(tmp))
+ }
+ return output.join('')
+}
+
+function fromByteArray (uint8) {
+ var tmp
+ var len = uint8.length
+ var extraBytes = len % 3 // if we have 1 byte left, pad 2 bytes
+ var parts = []
+ var maxChunkLength = 16383 // must be multiple of 3
+
+ // go through the array every three bytes, we'll deal with trailing stuff later
+ for (var i = 0, len2 = len - extraBytes; i < len2; i += maxChunkLength) {
+ parts.push(encodeChunk(
+ uint8, i, (i + maxChunkLength) > len2 ? len2 : (i + maxChunkLength)
+ ))
+ }
+
+ // pad the end with zeros, but make sure to not forget the extra bytes
+ if (extraBytes === 1) {
+ tmp = uint8[len - 1]
+ parts.push(
+ lookup[tmp >> 2] +
+ lookup[(tmp << 4) & 0x3F] +
+ '=='
+ )
+ } else if (extraBytes === 2) {
+ tmp = (uint8[len - 2] << 8) + uint8[len - 1]
+ parts.push(
+ lookup[tmp >> 10] +
+ lookup[(tmp >> 4) & 0x3F] +
+ lookup[(tmp << 2) & 0x3F] +
+ '='
+ )
+ }
+
+ return parts.join('')
+}
+
+
+/***/ }),
+/* 66 */
+/***/ (function(module, exports) {
+
+exports.read = function (buffer, offset, isLE, mLen, nBytes) {
+ var e, m
+ var eLen = (nBytes * 8) - mLen - 1
+ var eMax = (1 << eLen) - 1
+ var eBias = eMax >> 1
+ var nBits = -7
+ var i = isLE ? (nBytes - 1) : 0
+ var d = isLE ? -1 : 1
+ var s = buffer[offset + i]
+
+ i += d
+
+ e = s & ((1 << (-nBits)) - 1)
+ s >>= (-nBits)
+ nBits += eLen
+ for (; nBits > 0; e = (e * 256) + buffer[offset + i], i += d, nBits -= 8) {}
+
+ m = e & ((1 << (-nBits)) - 1)
+ e >>= (-nBits)
+ nBits += mLen
+ for (; nBits > 0; m = (m * 256) + buffer[offset + i], i += d, nBits -= 8) {}
+
+ if (e === 0) {
+ e = 1 - eBias
+ } else if (e === eMax) {
+ return m ? NaN : ((s ? -1 : 1) * Infinity)
+ } else {
+ m = m + Math.pow(2, mLen)
+ e = e - eBias
+ }
+ return (s ? -1 : 1) * m * Math.pow(2, e - mLen)
+}
+
+exports.write = function (buffer, value, offset, isLE, mLen, nBytes) {
+ var e, m, c
+ var eLen = (nBytes * 8) - mLen - 1
+ var eMax = (1 << eLen) - 1
+ var eBias = eMax >> 1
+ var rt = (mLen === 23 ? Math.pow(2, -24) - Math.pow(2, -77) : 0)
+ var i = isLE ? 0 : (nBytes - 1)
+ var d = isLE ? 1 : -1
+ var s = value < 0 || (value === 0 && 1 / value < 0) ? 1 : 0
+
+ value = Math.abs(value)
+
+ if (isNaN(value) || value === Infinity) {
+ m = isNaN(value) ? 1 : 0
+ e = eMax
+ } else {
+ e = Math.floor(Math.log(value) / Math.LN2)
+ if (value * (c = Math.pow(2, -e)) < 1) {
+ e--
+ c *= 2
+ }
+ if (e + eBias >= 1) {
+ value += rt / c
+ } else {
+ value += rt * Math.pow(2, 1 - eBias)
+ }
+ if (value * c >= 2) {
+ e++
+ c /= 2
+ }
+
+ if (e + eBias >= eMax) {
+ m = 0
+ e = eMax
+ } else if (e + eBias >= 1) {
+ m = ((value * c) - 1) * Math.pow(2, mLen)
+ e = e + eBias
+ } else {
+ m = value * Math.pow(2, eBias - 1) * Math.pow(2, mLen)
+ e = 0
+ }
+ }
+
+ for (; mLen >= 8; buffer[offset + i] = m & 0xff, i += d, m /= 256, mLen -= 8) {}
+
+ e = (e << mLen) | m
+ eLen += mLen
+ for (; eLen > 0; buffer[offset + i] = e & 0xff, i += d, e /= 256, eLen -= 8) {}
+
+ buffer[offset + i - d] |= s * 128
+}
+
+
+/***/ }),
+/* 67 */
+/***/ (function(module, exports) {
+
+var toString = {}.toString;
+
+module.exports = Array.isArray || function (arr) {
+ return toString.call(arr) == '[object Array]';
+};
+
+
+/***/ }),
+/* 68 */
+/***/ (function(module, exports, __webpack_require__) {
+
+/* WEBPACK VAR INJECTION */(function(module) {var __WEBPACK_AMD_DEFINE_RESULT__;// A port of an algorithm by Johannes Baagøe , 2010
+// http://baagoe.com/en/RandomMusings/javascript/
+// https://github.com/nquinlan/better-random-numbers-for-javascript-mirror
+// Original work is under MIT license -
+
+// Copyright (C) 2010 by Johannes Baagøe
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+
+
+(function(global, module, define) {
+
+function Alea(seed) {
+ var me = this, mash = Mash();
+
+ me.next = function() {
+ var t = 2091639 * me.s0 + me.c * 2.3283064365386963e-10; // 2^-32
+ me.s0 = me.s1;
+ me.s1 = me.s2;
+ return me.s2 = t - (me.c = t | 0);
+ };
+
+ // Apply the seeding algorithm from Baagoe.
+ me.c = 1;
+ me.s0 = mash(' ');
+ me.s1 = mash(' ');
+ me.s2 = mash(' ');
+ me.s0 -= mash(seed);
+ if (me.s0 < 0) { me.s0 += 1; }
+ me.s1 -= mash(seed);
+ if (me.s1 < 0) { me.s1 += 1; }
+ me.s2 -= mash(seed);
+ if (me.s2 < 0) { me.s2 += 1; }
+ mash = null;
+}
+
+function copy(f, t) {
+ t.c = f.c;
+ t.s0 = f.s0;
+ t.s1 = f.s1;
+ t.s2 = f.s2;
+ return t;
+}
+
+function impl(seed, opts) {
+ var xg = new Alea(seed),
+ state = opts && opts.state,
+ prng = xg.next;
+ prng.int32 = function() { return (xg.next() * 0x100000000) | 0; }
+ prng.double = function() {
+ return prng() + (prng() * 0x200000 | 0) * 1.1102230246251565e-16; // 2^-53
+ };
+ prng.quick = prng;
+ if (state) {
+ if (typeof(state) == 'object') copy(state, xg);
+ prng.state = function() { return copy(xg, {}); }
+ }
+ return prng;
+}
+
+function Mash() {
+ var n = 0xefc8249d;
+
+ var mash = function(data) {
+ data = data.toString();
+ for (var i = 0; i < data.length; i++) {
+ n += data.charCodeAt(i);
+ var h = 0.02519603282416938 * n;
+ n = h >>> 0;
+ h -= n;
+ h *= n;
+ n = h >>> 0;
+ h -= n;
+ n += h * 0x100000000; // 2^32
+ }
+ return (n >>> 0) * 2.3283064365386963e-10; // 2^-32
+ };
+
+ return mash;
+}
+
+
+if (module && module.exports) {
+ module.exports = impl;
+} else if (__webpack_require__(16) && __webpack_require__(29)) {
+ !(__WEBPACK_AMD_DEFINE_RESULT__ = (function() { return impl; }).call(exports, __webpack_require__, exports, module),
+ __WEBPACK_AMD_DEFINE_RESULT__ !== undefined && (module.exports = __WEBPACK_AMD_DEFINE_RESULT__));
+} else {
+ this.alea = impl;
+}
+
+})(
+ this,
+ true && module, // present in node.js
+ __webpack_require__(16) // present with an AMD loader
+);
+
+
+
+/* WEBPACK VAR INJECTION */}.call(this, __webpack_require__(28)(module)))
+
+/***/ }),
+/* 69 */
+/***/ (function(module, exports, __webpack_require__) {
+
+/* WEBPACK VAR INJECTION */(function(module) {var __WEBPACK_AMD_DEFINE_RESULT__;// A Javascript implementaion of the "xor128" prng algorithm by
+// George Marsaglia. See http://www.jstatsoft.org/v08/i14/paper
+
+(function(global, module, define) {
+
+function XorGen(seed) {
+ var me = this, strseed = '';
+
+ me.x = 0;
+ me.y = 0;
+ me.z = 0;
+ me.w = 0;
+
+ // Set up generator function.
+ me.next = function() {
+ var t = me.x ^ (me.x << 11);
+ me.x = me.y;
+ me.y = me.z;
+ me.z = me.w;
+ return me.w ^= (me.w >>> 19) ^ t ^ (t >>> 8);
+ };
+
+ if (seed === (seed | 0)) {
+ // Integer seed.
+ me.x = seed;
+ } else {
+ // String seed.
+ strseed += seed;
+ }
+
+ // Mix in string seed, then discard an initial batch of 64 values.
+ for (var k = 0; k < strseed.length + 64; k++) {
+ me.x ^= strseed.charCodeAt(k) | 0;
+ me.next();
+ }
+}
+
+function copy(f, t) {
+ t.x = f.x;
+ t.y = f.y;
+ t.z = f.z;
+ t.w = f.w;
+ return t;
+}
+
+function impl(seed, opts) {
+ var xg = new XorGen(seed),
+ state = opts && opts.state,
+ prng = function() { return (xg.next() >>> 0) / 0x100000000; };
+ prng.double = function() {
+ do {
+ var top = xg.next() >>> 11,
+ bot = (xg.next() >>> 0) / 0x100000000,
+ result = (top + bot) / (1 << 21);
+ } while (result === 0);
+ return result;
+ };
+ prng.int32 = xg.next;
+ prng.quick = prng;
+ if (state) {
+ if (typeof(state) == 'object') copy(state, xg);
+ prng.state = function() { return copy(xg, {}); }
+ }
+ return prng;
+}
+
+if (module && module.exports) {
+ module.exports = impl;
+} else if (__webpack_require__(16) && __webpack_require__(29)) {
+ !(__WEBPACK_AMD_DEFINE_RESULT__ = (function() { return impl; }).call(exports, __webpack_require__, exports, module),
+ __WEBPACK_AMD_DEFINE_RESULT__ !== undefined && (module.exports = __WEBPACK_AMD_DEFINE_RESULT__));
+} else {
+ this.xor128 = impl;
+}
+
+})(
+ this,
+ true && module, // present in node.js
+ __webpack_require__(16) // present with an AMD loader
+);
+
+
+
+/* WEBPACK VAR INJECTION */}.call(this, __webpack_require__(28)(module)))
+
+/***/ }),
+/* 70 */
+/***/ (function(module, exports, __webpack_require__) {
+
+/* WEBPACK VAR INJECTION */(function(module) {var __WEBPACK_AMD_DEFINE_RESULT__;// A Javascript implementaion of the "xorwow" prng algorithm by
+// George Marsaglia. See http://www.jstatsoft.org/v08/i14/paper
+
+(function(global, module, define) {
+
+function XorGen(seed) {
+ var me = this, strseed = '';
+
+ // Set up generator function.
+ me.next = function() {
+ var t = (me.x ^ (me.x >>> 2));
+ me.x = me.y; me.y = me.z; me.z = me.w; me.w = me.v;
+ return (me.d = (me.d + 362437 | 0)) +
+ (me.v = (me.v ^ (me.v << 4)) ^ (t ^ (t << 1))) | 0;
+ };
+
+ me.x = 0;
+ me.y = 0;
+ me.z = 0;
+ me.w = 0;
+ me.v = 0;
+
+ if (seed === (seed | 0)) {
+ // Integer seed.
+ me.x = seed;
+ } else {
+ // String seed.
+ strseed += seed;
+ }
+
+ // Mix in string seed, then discard an initial batch of 64 values.
+ for (var k = 0; k < strseed.length + 64; k++) {
+ me.x ^= strseed.charCodeAt(k) | 0;
+ if (k == strseed.length) {
+ me.d = me.x << 10 ^ me.x >>> 4;
+ }
+ me.next();
+ }
+}
+
+function copy(f, t) {
+ t.x = f.x;
+ t.y = f.y;
+ t.z = f.z;
+ t.w = f.w;
+ t.v = f.v;
+ t.d = f.d;
+ return t;
+}
+
+function impl(seed, opts) {
+ var xg = new XorGen(seed),
+ state = opts && opts.state,
+ prng = function() { return (xg.next() >>> 0) / 0x100000000; };
+ prng.double = function() {
+ do {
+ var top = xg.next() >>> 11,
+ bot = (xg.next() >>> 0) / 0x100000000,
+ result = (top + bot) / (1 << 21);
+ } while (result === 0);
+ return result;
+ };
+ prng.int32 = xg.next;
+ prng.quick = prng;
+ if (state) {
+ if (typeof(state) == 'object') copy(state, xg);
+ prng.state = function() { return copy(xg, {}); }
+ }
+ return prng;
+}
+
+if (module && module.exports) {
+ module.exports = impl;
+} else if (__webpack_require__(16) && __webpack_require__(29)) {
+ !(__WEBPACK_AMD_DEFINE_RESULT__ = (function() { return impl; }).call(exports, __webpack_require__, exports, module),
+ __WEBPACK_AMD_DEFINE_RESULT__ !== undefined && (module.exports = __WEBPACK_AMD_DEFINE_RESULT__));
+} else {
+ this.xorwow = impl;
+}
+
+})(
+ this,
+ true && module, // present in node.js
+ __webpack_require__(16) // present with an AMD loader
+);
+
+
+
+/* WEBPACK VAR INJECTION */}.call(this, __webpack_require__(28)(module)))
+
+/***/ }),
+/* 71 */
+/***/ (function(module, exports, __webpack_require__) {
+
+/* WEBPACK VAR INJECTION */(function(module) {var __WEBPACK_AMD_DEFINE_RESULT__;// A Javascript implementaion of the "xorshift7" algorithm by
+// François Panneton and Pierre L'ecuyer:
+// "On the Xorgshift Random Number Generators"
+// http://saluc.engr.uconn.edu/refs/crypto/rng/panneton05onthexorshift.pdf
+
+(function(global, module, define) {
+
+function XorGen(seed) {
+ var me = this;
+
+ // Set up generator function.
+ me.next = function() {
+ // Update xor generator.
+ var X = me.x, i = me.i, t, v, w;
+ t = X[i]; t ^= (t >>> 7); v = t ^ (t << 24);
+ t = X[(i + 1) & 7]; v ^= t ^ (t >>> 10);
+ t = X[(i + 3) & 7]; v ^= t ^ (t >>> 3);
+ t = X[(i + 4) & 7]; v ^= t ^ (t << 7);
+ t = X[(i + 7) & 7]; t = t ^ (t << 13); v ^= t ^ (t << 9);
+ X[i] = v;
+ me.i = (i + 1) & 7;
+ return v;
+ };
+
+ function init(me, seed) {
+ var j, w, X = [];
+
+ if (seed === (seed | 0)) {
+ // Seed state array using a 32-bit integer.
+ w = X[0] = seed;
+ } else {
+ // Seed state using a string.
+ seed = '' + seed;
+ for (j = 0; j < seed.length; ++j) {
+ X[j & 7] = (X[j & 7] << 15) ^
+ (seed.charCodeAt(j) + X[(j + 1) & 7] << 13);
+ }
+ }
+ // Enforce an array length of 8, not all zeroes.
+ while (X.length < 8) X.push(0);
+ for (j = 0; j < 8 && X[j] === 0; ++j);
+ if (j == 8) w = X[7] = -1; else w = X[j];
+
+ me.x = X;
+ me.i = 0;
+
+ // Discard an initial 256 values.
+ for (j = 256; j > 0; --j) {
+ me.next();
+ }
+ }
+
+ init(me, seed);
+}
+
+function copy(f, t) {
+ t.x = f.x.slice();
+ t.i = f.i;
+ return t;
+}
+
+function impl(seed, opts) {
+ if (seed == null) seed = +(new Date);
+ var xg = new XorGen(seed),
+ state = opts && opts.state,
+ prng = function() { return (xg.next() >>> 0) / 0x100000000; };
+ prng.double = function() {
+ do {
+ var top = xg.next() >>> 11,
+ bot = (xg.next() >>> 0) / 0x100000000,
+ result = (top + bot) / (1 << 21);
+ } while (result === 0);
+ return result;
+ };
+ prng.int32 = xg.next;
+ prng.quick = prng;
+ if (state) {
+ if (state.x) copy(state, xg);
+ prng.state = function() { return copy(xg, {}); }
+ }
+ return prng;
+}
+
+if (module && module.exports) {
+ module.exports = impl;
+} else if (__webpack_require__(16) && __webpack_require__(29)) {
+ !(__WEBPACK_AMD_DEFINE_RESULT__ = (function() { return impl; }).call(exports, __webpack_require__, exports, module),
+ __WEBPACK_AMD_DEFINE_RESULT__ !== undefined && (module.exports = __WEBPACK_AMD_DEFINE_RESULT__));
+} else {
+ this.xorshift7 = impl;
+}
+
+})(
+ this,
+ true && module, // present in node.js
+ __webpack_require__(16) // present with an AMD loader
+);
+
+
+/* WEBPACK VAR INJECTION */}.call(this, __webpack_require__(28)(module)))
+
+/***/ }),
+/* 72 */
+/***/ (function(module, exports, __webpack_require__) {
+
+/* WEBPACK VAR INJECTION */(function(module) {var __WEBPACK_AMD_DEFINE_RESULT__;// A Javascript implementaion of Richard Brent's Xorgens xor4096 algorithm.
+//
+// This fast non-cryptographic random number generator is designed for
+// use in Monte-Carlo algorithms. It combines a long-period xorshift
+// generator with a Weyl generator, and it passes all common batteries
+// of stasticial tests for randomness while consuming only a few nanoseconds
+// for each prng generated. For background on the generator, see Brent's
+// paper: "Some long-period random number generators using shifts and xors."
+// http://arxiv.org/pdf/1004.3115v1.pdf
+//
+// Usage:
+//
+// var xor4096 = require('xor4096');
+// random = xor4096(1); // Seed with int32 or string.
+// assert.equal(random(), 0.1520436450538547); // (0, 1) range, 53 bits.
+// assert.equal(random.int32(), 1806534897); // signed int32, 32 bits.
+//
+// For nonzero numeric keys, this impelementation provides a sequence
+// identical to that by Brent's xorgens 3 implementaion in C. This
+// implementation also provides for initalizing the generator with
+// string seeds, or for saving and restoring the state of the generator.
+//
+// On Chrome, this prng benchmarks about 2.1 times slower than
+// Javascript's built-in Math.random().
+
+(function(global, module, define) {
+
+function XorGen(seed) {
+ var me = this;
+
+ // Set up generator function.
+ me.next = function() {
+ var w = me.w,
+ X = me.X, i = me.i, t, v;
+ // Update Weyl generator.
+ me.w = w = (w + 0x61c88647) | 0;
+ // Update xor generator.
+ v = X[(i + 34) & 127];
+ t = X[i = ((i + 1) & 127)];
+ v ^= v << 13;
+ t ^= t << 17;
+ v ^= v >>> 15;
+ t ^= t >>> 12;
+ // Update Xor generator array state.
+ v = X[i] = v ^ t;
+ me.i = i;
+ // Result is the combination.
+ return (v + (w ^ (w >>> 16))) | 0;
+ };
+
+ function init(me, seed) {
+ var t, v, i, j, w, X = [], limit = 128;
+ if (seed === (seed | 0)) {
+ // Numeric seeds initialize v, which is used to generates X.
+ v = seed;
+ seed = null;
+ } else {
+ // String seeds are mixed into v and X one character at a time.
+ seed = seed + '\0';
+ v = 0;
+ limit = Math.max(limit, seed.length);
+ }
+ // Initialize circular array and weyl value.
+ for (i = 0, j = -32; j < limit; ++j) {
+ // Put the unicode characters into the array, and shuffle them.
+ if (seed) v ^= seed.charCodeAt((j + 32) % seed.length);
+ // After 32 shuffles, take v as the starting w value.
+ if (j === 0) w = v;
+ v ^= v << 10;
+ v ^= v >>> 15;
+ v ^= v << 4;
+ v ^= v >>> 13;
+ if (j >= 0) {
+ w = (w + 0x61c88647) | 0; // Weyl.
+ t = (X[j & 127] ^= (v + w)); // Combine xor and weyl to init array.
+ i = (0 == t) ? i + 1 : 0; // Count zeroes.
+ }
+ }
+ // We have detected all zeroes; make the key nonzero.
+ if (i >= 128) {
+ X[(seed && seed.length || 0) & 127] = -1;
+ }
+ // Run the generator 512 times to further mix the state before using it.
+ // Factoring this as a function slows the main generator, so it is just
+ // unrolled here. The weyl generator is not advanced while warming up.
+ i = 127;
+ for (j = 4 * 128; j > 0; --j) {
+ v = X[(i + 34) & 127];
+ t = X[i = ((i + 1) & 127)];
+ v ^= v << 13;
+ t ^= t << 17;
+ v ^= v >>> 15;
+ t ^= t >>> 12;
+ X[i] = v ^ t;
+ }
+ // Storing state as object members is faster than using closure variables.
+ me.w = w;
+ me.X = X;
+ me.i = i;
+ }
+
+ init(me, seed);
+}
+
+function copy(f, t) {
+ t.i = f.i;
+ t.w = f.w;
+ t.X = f.X.slice();
+ return t;
+};
+
+function impl(seed, opts) {
+ if (seed == null) seed = +(new Date);
+ var xg = new XorGen(seed),
+ state = opts && opts.state,
+ prng = function() { return (xg.next() >>> 0) / 0x100000000; };
+ prng.double = function() {
+ do {
+ var top = xg.next() >>> 11,
+ bot = (xg.next() >>> 0) / 0x100000000,
+ result = (top + bot) / (1 << 21);
+ } while (result === 0);
+ return result;
+ };
+ prng.int32 = xg.next;
+ prng.quick = prng;
+ if (state) {
+ if (state.X) copy(state, xg);
+ prng.state = function() { return copy(xg, {}); }
+ }
+ return prng;
+}
+
+if (module && module.exports) {
+ module.exports = impl;
+} else if (__webpack_require__(16) && __webpack_require__(29)) {
+ !(__WEBPACK_AMD_DEFINE_RESULT__ = (function() { return impl; }).call(exports, __webpack_require__, exports, module),
+ __WEBPACK_AMD_DEFINE_RESULT__ !== undefined && (module.exports = __WEBPACK_AMD_DEFINE_RESULT__));
+} else {
+ this.xor4096 = impl;
+}
+
+})(
+ this, // window object or global
+ true && module, // present in node.js
+ __webpack_require__(16) // present with an AMD loader
+);
+
+/* WEBPACK VAR INJECTION */}.call(this, __webpack_require__(28)(module)))
+
+/***/ }),
+/* 73 */
+/***/ (function(module, exports, __webpack_require__) {
+
+/* WEBPACK VAR INJECTION */(function(module) {var __WEBPACK_AMD_DEFINE_RESULT__;// A Javascript implementaion of the "Tyche-i" prng algorithm by
+// Samuel Neves and Filipe Araujo.
+// See https://eden.dei.uc.pt/~sneves/pubs/2011-snfa2.pdf
+
+(function(global, module, define) {
+
+function XorGen(seed) {
+ var me = this, strseed = '';
+
+ // Set up generator function.
+ me.next = function() {
+ var b = me.b, c = me.c, d = me.d, a = me.a;
+ b = (b << 25) ^ (b >>> 7) ^ c;
+ c = (c - d) | 0;
+ d = (d << 24) ^ (d >>> 8) ^ a;
+ a = (a - b) | 0;
+ me.b = b = (b << 20) ^ (b >>> 12) ^ c;
+ me.c = c = (c - d) | 0;
+ me.d = (d << 16) ^ (c >>> 16) ^ a;
+ return me.a = (a - b) | 0;
+ };
+
+ /* The following is non-inverted tyche, which has better internal
+ * bit diffusion, but which is about 25% slower than tyche-i in JS.
+ me.next = function() {
+ var a = me.a, b = me.b, c = me.c, d = me.d;
+ a = (me.a + me.b | 0) >>> 0;
+ d = me.d ^ a; d = d << 16 ^ d >>> 16;
+ c = me.c + d | 0;
+ b = me.b ^ c; b = b << 12 ^ d >>> 20;
+ me.a = a = a + b | 0;
+ d = d ^ a; me.d = d = d << 8 ^ d >>> 24;
+ me.c = c = c + d | 0;
+ b = b ^ c;
+ return me.b = (b << 7 ^ b >>> 25);
+ }
+ */
+
+ me.a = 0;
+ me.b = 0;
+ me.c = 2654435769 | 0;
+ me.d = 1367130551;
+
+ if (seed === Math.floor(seed)) {
+ // Integer seed.
+ me.a = (seed / 0x100000000) | 0;
+ me.b = seed | 0;
+ } else {
+ // String seed.
+ strseed += seed;
+ }
+
+ // Mix in string seed, then discard an initial batch of 64 values.
+ for (var k = 0; k < strseed.length + 20; k++) {
+ me.b ^= strseed.charCodeAt(k) | 0;
+ me.next();
+ }
+}
+
+function copy(f, t) {
+ t.a = f.a;
+ t.b = f.b;
+ t.c = f.c;
+ t.d = f.d;
+ return t;
+};
+
+function impl(seed, opts) {
+ var xg = new XorGen(seed),
+ state = opts && opts.state,
+ prng = function() { return (xg.next() >>> 0) / 0x100000000; };
+ prng.double = function() {
+ do {
+ var top = xg.next() >>> 11,
+ bot = (xg.next() >>> 0) / 0x100000000,
+ result = (top + bot) / (1 << 21);
+ } while (result === 0);
+ return result;
+ };
+ prng.int32 = xg.next;
+ prng.quick = prng;
+ if (state) {
+ if (typeof(state) == 'object') copy(state, xg);
+ prng.state = function() { return copy(xg, {}); }
+ }
+ return prng;
+}
+
+if (module && module.exports) {
+ module.exports = impl;
+} else if (__webpack_require__(16) && __webpack_require__(29)) {
+ !(__WEBPACK_AMD_DEFINE_RESULT__ = (function() { return impl; }).call(exports, __webpack_require__, exports, module),
+ __WEBPACK_AMD_DEFINE_RESULT__ !== undefined && (module.exports = __WEBPACK_AMD_DEFINE_RESULT__));
+} else {
+ this.tychei = impl;
+}
+
+})(
+ this,
+ true && module, // present in node.js
+ __webpack_require__(16) // present with an AMD loader
+);
+
+
+
+/* WEBPACK VAR INJECTION */}.call(this, __webpack_require__(28)(module)))
+
+/***/ }),
+/* 74 */
+/***/ (function(module, exports, __webpack_require__) {
+
+var __WEBPACK_AMD_DEFINE_RESULT__;/*
+Copyright 2014 David Bau.
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+*/
+
+(function (pool, math) {
+//
+// The following constants are related to IEEE 754 limits.
+//
+var global = this,
+ width = 256, // each RC4 output is 0 <= x < 256
+ chunks = 6, // at least six RC4 outputs for each double
+ digits = 52, // there are 52 significant digits in a double
+ rngname = 'random', // rngname: name for Math.random and Math.seedrandom
+ startdenom = math.pow(width, chunks),
+ significance = math.pow(2, digits),
+ overflow = significance * 2,
+ mask = width - 1,
+ nodecrypto; // node.js crypto module, initialized at the bottom.
+
+//
+// seedrandom()
+// This is the seedrandom function described above.
+//
+function seedrandom(seed, options, callback) {
+ var key = [];
+ options = (options == true) ? { entropy: true } : (options || {});
+
+ // Flatten the seed string or build one from local entropy if needed.
+ var shortseed = mixkey(flatten(
+ options.entropy ? [seed, tostring(pool)] :
+ (seed == null) ? autoseed() : seed, 3), key);
+
+ // Use the seed to initialize an ARC4 generator.
+ var arc4 = new ARC4(key);
+
+ // This function returns a random double in [0, 1) that contains
+ // randomness in every bit of the mantissa of the IEEE 754 value.
+ var prng = function() {
+ var n = arc4.g(chunks), // Start with a numerator n < 2 ^ 48
+ d = startdenom, // and denominator d = 2 ^ 48.
+ x = 0; // and no 'extra last byte'.
+ while (n < significance) { // Fill up all significant digits by
+ n = (n + x) * width; // shifting numerator and
+ d *= width; // denominator and generating a
+ x = arc4.g(1); // new least-significant-byte.
+ }
+ while (n >= overflow) { // To avoid rounding up, before adding
+ n /= 2; // last byte, shift everything
+ d /= 2; // right using integer math until
+ x >>>= 1; // we have exactly the desired bits.
+ }
+ return (n + x) / d; // Form the number within [0, 1).
+ };
+
+ prng.int32 = function() { return arc4.g(4) | 0; }
+ prng.quick = function() { return arc4.g(4) / 0x100000000; }
+ prng.double = prng;
+
+ // Mix the randomness into accumulated entropy.
+ mixkey(tostring(arc4.S), pool);
+
+ // Calling convention: what to return as a function of prng, seed, is_math.
+ return (options.pass || callback ||
+ function(prng, seed, is_math_call, state) {
+ if (state) {
+ // Load the arc4 state from the given state if it has an S array.
+ if (state.S) { copy(state, arc4); }
+ // Only provide the .state method if requested via options.state.
+ prng.state = function() { return copy(arc4, {}); }
+ }
+
+ // If called as a method of Math (Math.seedrandom()), mutate
+ // Math.random because that is how seedrandom.js has worked since v1.0.
+ if (is_math_call) { math[rngname] = prng; return seed; }
+
+ // Otherwise, it is a newer calling convention, so return the
+ // prng directly.
+ else return prng;
+ })(
+ prng,
+ shortseed,
+ 'global' in options ? options.global : (this == math),
+ options.state);
+}
+math['seed' + rngname] = seedrandom;
+
+//
+// ARC4
+//
+// An ARC4 implementation. The constructor takes a key in the form of
+// an array of at most (width) integers that should be 0 <= x < (width).
+//
+// The g(count) method returns a pseudorandom integer that concatenates
+// the next (count) outputs from ARC4. Its return value is a number x
+// that is in the range 0 <= x < (width ^ count).
+//
+function ARC4(key) {
+ var t, keylen = key.length,
+ me = this, i = 0, j = me.i = me.j = 0, s = me.S = [];
+
+ // The empty key [] is treated as [0].
+ if (!keylen) { key = [keylen++]; }
+
+ // Set up S using the standard key scheduling algorithm.
+ while (i < width) {
+ s[i] = i++;
+ }
+ for (i = 0; i < width; i++) {
+ s[i] = s[j = mask & (j + key[i % keylen] + (t = s[i]))];
+ s[j] = t;
+ }
+
+ // The "g" method returns the next (count) outputs as one number.
+ (me.g = function(count) {
+ // Using instance members instead of closure state nearly doubles speed.
+ var t, r = 0,
+ i = me.i, j = me.j, s = me.S;
+ while (count--) {
+ t = s[i = mask & (i + 1)];
+ r = r * width + s[mask & ((s[i] = s[j = mask & (j + t)]) + (s[j] = t))];
+ }
+ me.i = i; me.j = j;
+ return r;
+ // For robust unpredictability, the function call below automatically
+ // discards an initial batch of values. This is called RC4-drop[256].
+ // See http://google.com/search?q=rsa+fluhrer+response&btnI
+ })(width);
+}
+
+//
+// copy()
+// Copies internal state of ARC4 to or from a plain object.
+//
+function copy(f, t) {
+ t.i = f.i;
+ t.j = f.j;
+ t.S = f.S.slice();
+ return t;
+};
+
+//
+// flatten()
+// Converts an object tree to nested arrays of strings.
+//
+function flatten(obj, depth) {
+ var result = [], typ = (typeof obj), prop;
+ if (depth && typ == 'object') {
+ for (prop in obj) {
+ try { result.push(flatten(obj[prop], depth - 1)); } catch (e) {}
+ }
+ }
+ return (result.length ? result : typ == 'string' ? obj : obj + '\0');
+}
+
+//
+// mixkey()
+// Mixes a string seed into a key that is an array of integers, and
+// returns a shortened string seed that is equivalent to the result key.
+//
+function mixkey(seed, key) {
+ var stringseed = seed + '', smear, j = 0;
+ while (j < stringseed.length) {
+ key[mask & j] =
+ mask & ((smear ^= key[mask & j] * 19) + stringseed.charCodeAt(j++));
+ }
+ return tostring(key);
+}
+
+//
+// autoseed()
+// Returns an object for autoseeding, using window.crypto and Node crypto
+// module if available.
+//
+function autoseed() {
+ try {
+ var out;
+ if (nodecrypto && (out = nodecrypto.randomBytes)) {
+ // The use of 'out' to remember randomBytes makes tight minified code.
+ out = out(width);
+ } else {
+ out = new Uint8Array(width);
+ (global.crypto || global.msCrypto).getRandomValues(out);
+ }
+ return tostring(out);
+ } catch (e) {
+ var browser = global.navigator,
+ plugins = browser && browser.plugins;
+ return [+new Date, global, plugins, global.screen, tostring(pool)];
+ }
+}
+
+//
+// tostring()
+// Converts an array of charcodes to a string
+//
+function tostring(a) {
+ return String.fromCharCode.apply(0, a);
+}
+
+//
+// When seedrandom.js is loaded, we immediately mix a few bits
+// from the built-in RNG into the entropy pool. Because we do
+// not want to interfere with deterministic PRNG state later,
+// seedrandom will not call math.random on its own again after
+// initialization.
+//
+mixkey(math.random(), pool);
+
+//
+// Nodejs and AMD support: export the implementation as a module using
+// either convention.
+//
+if ( true && module.exports) {
+ module.exports = seedrandom;
+ // When in node.js, try using crypto package for autoseeding.
+ try {
+ nodecrypto = __webpack_require__(75);
+ } catch (ex) {}
+} else if (true) {
+ !(__WEBPACK_AMD_DEFINE_RESULT__ = (function() { return seedrandom; }).call(exports, __webpack_require__, exports, module),
+ __WEBPACK_AMD_DEFINE_RESULT__ !== undefined && (module.exports = __WEBPACK_AMD_DEFINE_RESULT__));
+}
+
+// End anonymous scope, and pass initial values.
+})(
+ [], // pool: entropy pool starts empty
+ Math // math: package containing random, pow, and seedrandom
+);
+
+
+/***/ }),
+/* 75 */
+/***/ (function(module, exports) {
+
+/* (ignored) */
+
+/***/ }),
+/* 76 */
+/***/ (function(module, exports, __webpack_require__) {
+
+/* WEBPACK VAR INJECTION */(function(global) {var scope = (typeof global !== "undefined" && global) ||
+ (typeof self !== "undefined" && self) ||
+ window;
+var apply = Function.prototype.apply;
+
+// DOM APIs, for completeness
+
+exports.setTimeout = function() {
+ return new Timeout(apply.call(setTimeout, scope, arguments), clearTimeout);
+};
+exports.setInterval = function() {
+ return new Timeout(apply.call(setInterval, scope, arguments), clearInterval);
+};
+exports.clearTimeout =
+exports.clearInterval = function(timeout) {
+ if (timeout) {
+ timeout.close();
+ }
+};
+
+function Timeout(id, clearFn) {
+ this._id = id;
+ this._clearFn = clearFn;
+}
+Timeout.prototype.unref = Timeout.prototype.ref = function() {};
+Timeout.prototype.close = function() {
+ this._clearFn.call(scope, this._id);
+};
+
+// Does not start the time, just sets up the members needed.
+exports.enroll = function(item, msecs) {
+ clearTimeout(item._idleTimeoutId);
+ item._idleTimeout = msecs;
+};
+
+exports.unenroll = function(item) {
+ clearTimeout(item._idleTimeoutId);
+ item._idleTimeout = -1;
+};
+
+exports._unrefActive = exports.active = function(item) {
+ clearTimeout(item._idleTimeoutId);
+
+ var msecs = item._idleTimeout;
+ if (msecs >= 0) {
+ item._idleTimeoutId = setTimeout(function onTimeout() {
+ if (item._onTimeout)
+ item._onTimeout();
+ }, msecs);
+ }
+};
+
+// setimmediate attaches itself to the global object
+__webpack_require__(77);
+// On some exotic environments, it's not clear which object `setimmediate` was
+// able to install onto. Search each possibility in the same order as the
+// `setimmediate` library.
+exports.setImmediate = (typeof self !== "undefined" && self.setImmediate) ||
+ (typeof global !== "undefined" && global.setImmediate) ||
+ (this && this.setImmediate);
+exports.clearImmediate = (typeof self !== "undefined" && self.clearImmediate) ||
+ (typeof global !== "undefined" && global.clearImmediate) ||
+ (this && this.clearImmediate);
+
+/* WEBPACK VAR INJECTION */}.call(this, __webpack_require__(27)))
+
+/***/ }),
+/* 77 */
+/***/ (function(module, exports, __webpack_require__) {
+
+/* WEBPACK VAR INJECTION */(function(global, process) {(function (global, undefined) {
+ "use strict";
+
+ if (global.setImmediate) {
+ return;
+ }
+
+ var nextHandle = 1; // Spec says greater than zero
+ var tasksByHandle = {};
+ var currentlyRunningATask = false;
+ var doc = global.document;
+ var registerImmediate;
+
+ function setImmediate(callback) {
+ // Callback can either be a function or a string
+ if (typeof callback !== "function") {
+ callback = new Function("" + callback);
+ }
+ // Copy function arguments
+ var args = new Array(arguments.length - 1);
+ for (var i = 0; i < args.length; i++) {
+ args[i] = arguments[i + 1];
+ }
+ // Store and register the task
+ var task = { callback: callback, args: args };
+ tasksByHandle[nextHandle] = task;
+ registerImmediate(nextHandle);
+ return nextHandle++;
+ }
+
+ function clearImmediate(handle) {
+ delete tasksByHandle[handle];
+ }
+
+ function run(task) {
+ var callback = task.callback;
+ var args = task.args;
+ switch (args.length) {
+ case 0:
+ callback();
+ break;
+ case 1:
+ callback(args[0]);
+ break;
+ case 2:
+ callback(args[0], args[1]);
+ break;
+ case 3:
+ callback(args[0], args[1], args[2]);
+ break;
+ default:
+ callback.apply(undefined, args);
+ break;
+ }
+ }
+
+ function runIfPresent(handle) {
+ // From the spec: "Wait until any invocations of this algorithm started before this one have completed."
+ // So if we're currently running a task, we'll need to delay this invocation.
+ if (currentlyRunningATask) {
+ // Delay by doing a setTimeout. setImmediate was tried instead, but in Firefox 7 it generated a
+ // "too much recursion" error.
+ setTimeout(runIfPresent, 0, handle);
+ } else {
+ var task = tasksByHandle[handle];
+ if (task) {
+ currentlyRunningATask = true;
+ try {
+ run(task);
+ } finally {
+ clearImmediate(handle);
+ currentlyRunningATask = false;
+ }
+ }
+ }
+ }
+
+ function installNextTickImplementation() {
+ registerImmediate = function(handle) {
+ process.nextTick(function () { runIfPresent(handle); });
+ };
+ }
+
+ function canUsePostMessage() {
+ // The test against `importScripts` prevents this implementation from being installed inside a web worker,
+ // where `global.postMessage` means something completely different and can't be used for this purpose.
+ if (global.postMessage && !global.importScripts) {
+ var postMessageIsAsynchronous = true;
+ var oldOnMessage = global.onmessage;
+ global.onmessage = function() {
+ postMessageIsAsynchronous = false;
+ };
+ global.postMessage("", "*");
+ global.onmessage = oldOnMessage;
+ return postMessageIsAsynchronous;
+ }
+ }
+
+ function installPostMessageImplementation() {
+ // Installs an event handler on `global` for the `message` event: see
+ // * https://developer.mozilla.org/en/DOM/window.postMessage
+ // * http://www.whatwg.org/specs/web-apps/current-work/multipage/comms.html#crossDocumentMessages
+
+ var messagePrefix = "setImmediate$" + Math.random() + "$";
+ var onGlobalMessage = function(event) {
+ if (event.source === global &&
+ typeof event.data === "string" &&
+ event.data.indexOf(messagePrefix) === 0) {
+ runIfPresent(+event.data.slice(messagePrefix.length));
+ }
+ };
+
+ if (global.addEventListener) {
+ global.addEventListener("message", onGlobalMessage, false);
+ } else {
+ global.attachEvent("onmessage", onGlobalMessage);
+ }
+
+ registerImmediate = function(handle) {
+ global.postMessage(messagePrefix + handle, "*");
+ };
+ }
+
+ function installMessageChannelImplementation() {
+ var channel = new MessageChannel();
+ channel.port1.onmessage = function(event) {
+ var handle = event.data;
+ runIfPresent(handle);
+ };
+
+ registerImmediate = function(handle) {
+ channel.port2.postMessage(handle);
+ };
+ }
+
+ function installReadyStateChangeImplementation() {
+ var html = doc.documentElement;
+ registerImmediate = function(handle) {
+ // Create a
+
+
+
+
+
+