From d4830f09cb9836d7bca0e4a6da072c2110b02209 Mon Sep 17 00:00:00 2001 From: Ron Date: Tue, 8 Feb 2022 08:59:49 -0700 Subject: [PATCH 01/69] Update TaskParameters.ts westus3 is supported as well https://docs.microsoft.com/en-us/azure/virtual-machines/image-builder-overview#regions I did not see an "australia" location supported but a australiaeast --- src/TaskParameters.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/TaskParameters.ts b/src/TaskParameters.ts index 17442c7a..92b698d8 100644 --- a/src/TaskParameters.ts +++ b/src/TaskParameters.ts @@ -47,7 +47,7 @@ export default class TaskParameters { public distImageTags: string = ""; constructor() { - var locations = ["eastus", "eastus2", "westcentralus", "westus", "westus2", "southcentralus", "northeurope", "westeurope", "southeastasia", "australiasoutheast", "australia", "uksouth", "ukwest" ]; + var locations = ["eastus", "eastus2", "westcentralus", "westus", "westus2", "westus3", "southcentralus", "northeurope", "westeurope", "southeastasia", "australiasoutheast", "australiaeast", "uksouth", "ukwest" ]; console.log("start reading task parameters..."); From 9727129c6fa086a8c8d099c0ffbadfb51b75cb9c Mon Sep 17 00:00:00 2001 From: Ron Truex Date: Thu, 10 Feb 2022 13:22:58 -0700 Subject: [PATCH 02/69] updates for run action --- README.md | 10 + action.yml | 6 + lib/AzureImageBuilderClient.js | 62 +- lib/ImageBuilder.js | 43 +- lib/TaskParameters.js | 10 + lib/constants.js | 1 + package-lock.json | 2504 +++++++++++++++++++++++++++++++- src/AzureImageBuilderClient.ts | 54 +- src/ImageBuilder.ts | 37 +- src/constants.ts | 1 + 10 files changed, 2688 insertions(+), 40 deletions(-) diff --git a/README.md b/README.md index 0f819edd..9db4f62d 100644 --- a/README.md +++ b/README.md @@ -41,6 +41,16 @@ Learn more about configuring permissions for Azure Image builder Service using [ # Inputs for the Action +* `action-run-mode`: Required. This is the run mode for the action. The options are + * Full + * default + * Wait until everything completes + * Distro + * Wait until the template starts the distro process + * NoWait + * This will not wait for actions to complete, like run template + * It will try to get the action to complete the quicket but can require the most manual cleanup + * `resource-group-name`: Required. This is the resource group where the action creates a storage for saving artifacts needed for customized image. Azure image builder also uses the same resource group for Image Template creation. * `image-builder-template-name`: The name of the image builder template resource to be used for creating and running the Image builder service. If you already have an [AIB Template file](https://github.com/danielsollondon/azvmimagebuilder/tree/master/quickquickstarts) downloaded in the runner, then you can give the full filepath to that as well. E.g. _${{ GITHUB.WORKSPACE }}/vmImageTemplate/ubuntuCustomVM.json_. Note that incase a filepath is provided in this action input, then parameters in the file will take precedence over action inputs. Irrespective, customizer section of action is always executed. diff --git a/action.yml b/action.yml index ff4faf71..100e2497 100644 --- a/action.yml +++ b/action.yml @@ -1,6 +1,12 @@ name: "Build Azure Virtual Machine Image" description: "Create custom virtual machine images that contain artifacts built in CI workflows" inputs: + #action inputs + action-run-mode: + description: 'The run mode for the action. Full (default) wait until everything completes, Distro wait until the template starts the distro process, NoWait as soon as the action sets up and triggers the action it will quit' + required: true + default: full + #general inputs location: description: 'This is the Azure region in which the Image Builder will run.' diff --git a/lib/AzureImageBuilderClient.js b/lib/AzureImageBuilderClient.js index fdc3356e..fc8297f0 100644 --- a/lib/AzureImageBuilderClient.js +++ b/lib/AzureImageBuilderClient.js @@ -81,6 +81,32 @@ class ImageBuilderClient { } }); } + getRunTemplate(templateName, subscriptionId){ + var response; + return __awaiter(this, void 0, void 0, function* () { + try { + let httpRequest = { + method: 'GET', + uri: this._client.getRequestUri(`/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.VirtualMachineImages/imagetemplates/{imageTemplateName}`, { '{subscriptionId}': subscriptionId, '{resourceGroupName}': this._taskParameters.resourceGroupName, '{imageTemplateName}': templateName }, [], apiVersion) + }; + response = yield this._client.beginRequest(httpRequest); + + if (response.statusCode == 202) { + response = yield this.getLongRunningOperationResult(response); + } + if (response.statusCode != 200 || response.body.status == "Failed") { + throw AzureRestClient_1.ToError(response); + } + if (response.statusCode == 200 && response.body && response.body.status == "Succeeded") { + console.log("Run template: \n", response.body.status); + } + return response + } + catch (error) { + throw Error(`Post template call failed for template ${templateName} with error: ${JSON.stringify(error)}`); + } + }); + } runTemplate(templateName, subscriptionId, timeOutInMinutes) { return __awaiter(this, void 0, void 0, function* () { try { @@ -89,9 +115,14 @@ class ImageBuilderClient { method: 'POST', uri: this._client.getRequestUri(`/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.VirtualMachineImages/imagetemplates/{imageTemplateName}/run`, { '{subscriptionId}': subscriptionId, '{resourceGroupName}': this._taskParameters.resourceGroupName, '{imageTemplateName}': templateName }, [], apiVersion) }; - var response = yield this._client.beginRequest(httpRequest); + var response = yield this._client.beginRequest(httpRequest); + if (response.statusCode == 202) { - response = yield this.getLongRunningOperationResult(response, timeOutInMinutes); + if (this._taskParameters.actionRunMode == "nowait"){ + console.log("Action Run Mode set to NoWait. Skipping wait\n"); + return + } + response = yield this.getLongRunningOperationResult(response, timeOutInMinutes, templateName, subscriptionId); } if (response.statusCode != 200 || response.body.status == "Failed") { throw AzureRestClient_1.ToError(response); @@ -155,7 +186,7 @@ class ImageBuilderClient { return output; }); } - getLongRunningOperationResult(response, timeoutInMinutes) { + getLongRunningOperationResult(response, timeoutInMinutes, templateName, subscriptionId) { var response; return __awaiter(this, void 0, void 0, function* () { var longRunningOperationRetryTimeout = !!timeoutInMinutes ? timeoutInMinutes : 0; @@ -173,6 +204,7 @@ class ImageBuilderClient { if (!httpRequest.uri) { console.log("error in uri " + httpRequest.uri); } + var sleepDuration = 15; while (true) { response = yield this._client.beginRequest(httpRequest); if (response.statusCode === 202 || (response.body && (response.body.status == "Accepted" || response.body.status == "Running" || response.body.status == "InProgress"))) { @@ -182,7 +214,29 @@ class ImageBuilderClient { if (!waitIndefinitely && timeout < new Date().getTime()) { throw Error(`error in url`); } - var sleepDuration = 15; + if (this._taskParameters.actionRunMode == "distro" && templateName && subscriptionId){ + try{ + let runTemplate_result = null + try{ + runTemplate_result = yield this.getRunTemplate(templateName, subscriptionId).then(result=> (runTemplate_result = result)) + + if (!runTemplate_result.body.properties && !runTemplate_result.body.properties.lastRunStatus){ + if (runTemplate_result.properties.lastRunStatus.runSubState.toLowerCase() == "distributing"){ + console.log("Template is distributing set to break") + response = runTemplate_result + return response + } + } + } + catch(err){ + console.log(err) + } + } + catch(err){ + console.log(err) + } + } + yield this.sleepFor(sleepDuration); } else { diff --git a/lib/ImageBuilder.js b/lib/ImageBuilder.js index da2e1da6..13267304 100644 --- a/lib/ImageBuilder.js +++ b/lib/ImageBuilder.js @@ -97,11 +97,15 @@ class ImageBuilder { var template = JSON.parse(this._taskParameters.templateJsonFromUser); this._taskParameters.location = template.location; } + + this.templateName = this.getTemplateName(); + var runOutputName = this.getRunoutputName(); + console.log("Using Managed Identity " + this.idenityName); var blobUrl = ""; if (isCreateBlob) { //create a blob service - yield this.createStorageAccount(); + yield this.createStorageAccount(this.templateName); this._blobService = azure.createBlobService(this.storageAccount, this.accountkeys); this.containerName = constants.containerName; var blobName = this._taskParameters.buildFolder + "/" + process.env.GITHUB_RUN_ID + "/" + this._taskParameters.buildFolder + `_${Utils_1.getCurrentTime()}`; @@ -119,8 +123,6 @@ class ImageBuilder { else { templateJson = this._buildTemplate.addUserCustomisationIfNeeded(blobUrl); } - this.templateName = this.getTemplateName(); - var runOutputName = this.getRunoutputName(); templateJson.properties.distribute[0].runOutputName = runOutputName; this.isVhdDistribute = templateJson.properties.distribute[0].type == "VHD"; var templateStr = JSON.stringify(templateJson, null, 2); @@ -129,23 +131,34 @@ class ImageBuilder { yield this._aibClient.putImageTemplate(templateStr, this.templateName, subscriptionId); this.imgBuilderTemplateExists = true; yield this._aibClient.runTemplate(this.templateName, subscriptionId, this._taskParameters.buildTimeoutInMinutes); - var out = yield this._aibClient.getRunOutput(this.templateName, runOutputName, subscriptionId); var templateID = yield this._aibClient.getTemplateId(this.templateName, subscriptionId); - var imagebuilderRunStatus = "failed"; - core.setOutput('templateName', this.templateName); - core.setOutput('templateId', templateID); - core.setOutput('run-output-name', runOutputName); - if (out) { + + if (this._taskParameters.actionRunMode !== "nowait"){ + var out = yield this._aibClient.getRunOutput(this.templateName, runOutputName, subscriptionId); + var imagebuilderRunStatus = "failed"; + core.setOutput('templateName', this.templateName); + core.setOutput('templateId', templateID); + core.setOutput('run-output-name', runOutputName); + if (out) { + core.setOutput('custom-image-uri', out); + core.setOutput('imagebuilder-run-status', "succeeded"); + imagebuilderRunStatus = "succeeded"; + } + } + else{ + out = "" core.setOutput('custom-image-uri', out); - core.setOutput('imagebuilder-run-status', "succeeded"); - imagebuilderRunStatus = "succeeded"; + core.setOutput('imagebuilder-run-status', "skipped"); + imagebuilderRunStatus = "skipped"; } + if (Utils_2.default.IsEqual(templateJson.properties.source.type, "PlatformImage")) { core.setOutput('pirPublisher', templateJson.properties.source.publisher); core.setOutput('pirOffer', templateJson.properties.source.offer); core.setOutput('pirSku', templateJson.properties.source.sku); core.setOutput('pirVersion', templateJson.properties.source.version); } + console.log("=============================================================================="); console.log("## task output variables ##"); console.log("$(imagebuilder-run-status) = ", imagebuilderRunStatus); @@ -162,15 +175,19 @@ class ImageBuilder { finally { var outStream = yield this.executeAzCliCommand(`group exists -n ${this._taskParameters.resourceGroupName}`); if (outStream) { + if (imagebuilderRunStatus != "failed" && (this._taskParameters.actionRunMode == "nowait" || this._taskParameters.actionRunMode == "distro")){ + console.log("skipping cleanup action run mode set to nowait or distro") + return + } this.cleanup(subscriptionId); } } }); } - createStorageAccount() { + createStorageAccount(templateName) { return __awaiter(this, void 0, void 0, function* () { this.storageAccount = Util.format('%s%s', constants.storageAccountName, Utils_1.getCurrentTime()); - yield this.executeAzCliCommand(`storage account create --name "${this.storageAccount}" --resource-group "${this._taskParameters.resourceGroupName}" --location "${this._taskParameters.location}" --sku Standard_RAGRS`); + yield this.executeAzCliCommand(`storage account create --name "${this.storageAccount}" --resource-group "${this._taskParameters.resourceGroupName}" --location "${this._taskParameters.location}" --sku Standard_RAGRS --tags "imageTemplateName=${templateName}"`); core.debug("Created storage account " + this.storageAccount); var outStream = yield this.executeAzCliCommand(`storage account keys list -g "${this._taskParameters.resourceGroupName}" -n "${this.storageAccount}"`); this.accountkeys = JSON.parse(`${outStream}`)[0].value; diff --git a/lib/TaskParameters.js b/lib/TaskParameters.js index 7773f7a9..4e697803 100644 --- a/lib/TaskParameters.js +++ b/lib/TaskParameters.js @@ -29,6 +29,8 @@ const Utils_1 = __importDefault(require("./Utils")); var fs = require('fs'); class TaskParameters { constructor() { + // action inputs + this.actionRunMode = "" // image builder inputs this.resourceGroupName = ""; this.location = ""; @@ -63,6 +65,14 @@ class TaskParameters { this.distImageTags = ""; var locations = ["eastus", "eastus2", "westcentralus", "westus", "westus2", "southcentralus", "northeurope", "westeurope", "southeastasia", "australiasoutheast", "australia", "uksouth", "ukwest" ]; console.log("start reading task parameters..."); + + this.actionRunMode = tl.getInput(constants.ActionRunMode, { required: true }).toLowerCase(); + if (!this.actionRunMode){ + this.actionRunMode = "full" + } + + console.log(`Action run mode set: ${this.actionRunMode}`) + this.imagebuilderTemplateName = tl.getInput(constants.ImageBuilderTemplateName); if (this.imagebuilderTemplateName.indexOf(".json") > -1) { this.isTemplateJsonProvided = true; diff --git a/lib/constants.js b/lib/constants.js index b6ce48cf..87f543cc 100644 --- a/lib/constants.js +++ b/lib/constants.js @@ -1,5 +1,6 @@ "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); +exports.ActionRunMode = "action-run-mode"; exports.imageTemplateName = exports.containerName = exports.storageAccountName = exports.DistImageTags = exports.RunOutputName = exports.DistLocation = exports.DistResourceId = exports.DistributeType = exports.customizerDestination = exports.customizerScript = exports.customizerWindowsUpdate = exports.WindowsUpdateProvisioner = exports.InlineScript = exports.CustomizerSource = exports.sharedImageGallerySourceTypeImage = exports.managedImageSourceTypeImage = exports.marketPlaceSourceTypeImage = exports.platformImageSourceTypeImage = exports.SourceImage = exports.SourceOSType = exports.SourceImageType = exports.ManagedIdentity = exports.VMSize = exports.BuildTimeoutInMinutes = exports.ImageBuilderTemplateName = exports.ResourceGroupName = exports.Location = void 0; exports.Location = "location"; exports.ResourceGroupName = "resource-group-name"; diff --git a/package-lock.json b/package-lock.json index e0794868..32453cc0 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,8 +1,2491 @@ { "name": "build-vm-image", "version": "1.0.0", - "lockfileVersion": 1, + "lockfileVersion": 2, "requires": true, + "packages": { + "": { + "name": "build-vm-image", + "version": "1.0.0", + "license": "ISC", + "dependencies": { + "@actions/core": "^1.2.4", + "@actions/exec": "^1.0.4", + "@actions/io": "^1.0.2", + "@types/node": "^14.6.0", + "@types/q": "^1.5.4", + "archiver": "^5.0.0", + "azure-actions-webclient": "^1.0.11", + "azure-storage": "^2.10.3", + "eslint": "^5.16.0", + "jszip": "^3.5.0", + "tar.gz": "^1.0.7", + "typescript": "^3.9.7", + "zip-lib": "^0.7.1" + }, + "devDependencies": {} + }, + "node_modules/@actions/core": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/@actions/core/-/core-1.2.4.tgz", + "integrity": "sha512-YJCEq8BE3CdN8+7HPZ/4DxJjk/OkZV2FFIf+DlZTC/4iBlzYCD5yjRR6eiOS5llO11zbRltIRuKAjMKaWTE6cg==" + }, + "node_modules/@actions/exec": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@actions/exec/-/exec-1.0.4.tgz", + "integrity": "sha512-4DPChWow9yc9W3WqEbUj8Nr86xkpyE29ZzWjXucHItclLbEW6jr80Zx4nqv18QL6KK65+cifiQZXvnqgTV6oHw==", + "dependencies": { + "@actions/io": "^1.0.1" + } + }, + "node_modules/@actions/io": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@actions/io/-/io-1.0.2.tgz", + "integrity": "sha512-J8KuFqVPr3p6U8W93DOXlXW6zFvrQAJANdS+vw0YhusLIq+bszW8zmK2Fh1C2kDPX8FMvwIl1OUcFgvJoXLbAg==" + }, + "node_modules/@babel/code-frame": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.10.4.tgz", + "integrity": "sha512-vG6SvB6oYEhvgisZNFRmRCUkLz11c7rp+tbNTynGqc6mS1d5ATd/sGyV6W0KZZnXRKMTzZDRgQT3Ou9jhpAfUg==", + "dependencies": { + "@babel/highlight": "^7.10.4" + } + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.10.4.tgz", + "integrity": "sha512-3U9y+43hz7ZM+rzG24Qe2mufW5KhvFg/NhnNph+i9mgCtdTCtMJuI1TMkrIUiK7Ix4PYlRF9I5dhqaLYA/ADXw==" + }, + "node_modules/@babel/highlight": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.10.4.tgz", + "integrity": "sha512-i6rgnR/YgPEQzZZnbTHHuZdlE8qyoBNalD6F+q4vAFlcMEcqmkoG+mPqJYJCo63qPf74+Y1UZsl3l6f7/RIkmA==", + "dependencies": { + "@babel/helper-validator-identifier": "^7.10.4", + "chalk": "^2.0.0", + "js-tokens": "^4.0.0" + } + }, + "node_modules/@types/node": { + "version": "14.6.0", + "resolved": "https://registry.npmjs.org/@types/node/-/node-14.6.0.tgz", + "integrity": "sha512-mikldZQitV94akrc4sCcSjtJfsTKt4p+e/s0AGscVA6XArQ9kFclP+ZiYUMnq987rc6QlYxXv/EivqlfSLxpKA==" + }, + "node_modules/@types/q": { + "version": "1.5.4", + "resolved": "https://registry.npmjs.org/@types/q/-/q-1.5.4.tgz", + "integrity": "sha512-1HcDas8SEj4z1Wc696tH56G8OlRaH/sqZOynNNB+HF0WOeXPaxTtbYzJY2oEfiUxjSKjhCKr+MvR7dCHcEelug==" + }, + "node_modules/acorn": { + "version": "6.4.1", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-6.4.1.tgz", + "integrity": "sha512-ZVA9k326Nwrj3Cj9jlh3wGFutC2ZornPNARZwsNYqQYgN0EsV2d53w5RN/co65Ohn4sUAUtb1rSUAOD6XN9idA==", + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-jsx": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.2.0.tgz", + "integrity": "sha512-HiUX/+K2YpkpJ+SzBffkM/AQ2YE03S0U1kjTLVpoJdhZMOWy8qvXVN9JdLqv2QsaQ6MPYQIuNmwD8zOiYUofLQ==", + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0" + } + }, + "node_modules/ajv": { + "version": "6.12.3", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.3.tgz", + "integrity": "sha512-4K0cK3L1hsqk9xIb2z9vs/XU+PGJZ9PNpJRDS9YLzmNdX6jmVPfamLvTJr0aDAusnHyCHO6MjzlkAsgtqp9teA==", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/ansi-escapes": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-3.2.0.tgz", + "integrity": "sha512-cBhpre4ma+U0T1oM5fXg7Dy1Jw7zzwv7lt/GoCpr+hDQJoYnKVPLL4dCvSEFMmQurOQvSrwT7SL/DAlhBI97RQ==", + "engines": { + "node": ">=4" + } + }, + "node_modules/ansi-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.0.tgz", + "integrity": "sha1-7QMXwyIGT3lGbAKWa922Bas32Zg=", + "engines": { + "node": ">=4" + } + }, + "node_modules/ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "dependencies": { + "color-convert": "^1.9.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/archiver": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/archiver/-/archiver-5.0.0.tgz", + "integrity": "sha512-AEWhJz6Yi6hWtN1Sqy/H4sZo/lLMJ/NftXxGaDy/TnOMmmjsRaZc/Ts+U4BsPoBQkuunTN6t8hk7iU9A+HBxLw==", + "dependencies": { + "archiver-utils": "^2.1.0", + "async": "^3.2.0", + "buffer-crc32": "^0.2.1", + "readable-stream": "^3.6.0", + "readdir-glob": "^1.0.0", + "tar-stream": "^2.1.2", + "zip-stream": "^4.0.0" + }, + "engines": { + "node": ">= 10" + } + }, + "node_modules/archiver-utils": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/archiver-utils/-/archiver-utils-2.1.0.tgz", + "integrity": "sha512-bEL/yUb/fNNiNTuUz979Z0Yg5L+LzLxGJz8x79lYmR54fmTIb6ob/hNQgkQnIUDWIFjZVQwl9Xs356I6BAMHfw==", + "dependencies": { + "glob": "^7.1.4", + "graceful-fs": "^4.2.0", + "lazystream": "^1.0.0", + "lodash.defaults": "^4.2.0", + "lodash.difference": "^4.5.0", + "lodash.flatten": "^4.4.0", + "lodash.isplainobject": "^4.0.6", + "lodash.union": "^4.6.0", + "normalize-path": "^3.0.0", + "readable-stream": "^2.0.0" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/archiver/node_modules/readable-stream": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz", + "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "dependencies": { + "sprintf-js": "~1.0.2" + } + }, + "node_modules/array-filter": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/array-filter/-/array-filter-1.0.0.tgz", + "integrity": "sha1-uveeYubvTCpMC4MSMtr/7CUfnYM=" + }, + "node_modules/asn1": { + "version": "0.2.4", + "resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.4.tgz", + "integrity": "sha512-jxwzQpLQjSmWXgwaCZE9Nz+glAG01yF1QnWgbhGwHI5A6FRIEY6IVqtHhIepHqI7/kyEyQEagBC5mBEFlIYvdg==", + "dependencies": { + "safer-buffer": "~2.1.0" + } + }, + "node_modules/assert-plus": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-1.0.0.tgz", + "integrity": "sha1-8S4PPF13sLHN2RRpQuTpbB5N1SU=", + "engines": { + "node": ">=0.8" + } + }, + "node_modules/astral-regex": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/astral-regex/-/astral-regex-1.0.0.tgz", + "integrity": "sha512-+Ryf6g3BKoRc7jfp7ad8tM4TtMiaWvbF/1/sQcZPkkS7ag3D5nMBCe2UfOTONtAkaG0tO0ij3C5Lwmf1EiyjHg==", + "engines": { + "node": ">=4" + } + }, + "node_modules/async": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/async/-/async-3.2.0.tgz", + "integrity": "sha512-TR2mEZFVOj2pLStYxLht7TyfuRzaydfpxr3k9RpHIzMgw7A64dzsdqCxH1WJyQdoe8T10nDXd9wnEigmiuHIZw==" + }, + "node_modules/asynckit": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", + "integrity": "sha1-x57Zf380y48robyXkLzDZkdLS3k=" + }, + "node_modules/available-typed-arrays": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/available-typed-arrays/-/available-typed-arrays-1.0.2.tgz", + "integrity": "sha512-XWX3OX8Onv97LMk/ftVyBibpGwY5a8SmuxZPzeOxqmuEqUCOM9ZE+uIaD1VNJ5QnvU2UQusvmKbuM1FR8QWGfQ==", + "dependencies": { + "array-filter": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/aws-sign2": { + "version": "0.7.0", + "resolved": "https://registry.npmjs.org/aws-sign2/-/aws-sign2-0.7.0.tgz", + "integrity": "sha1-tG6JCTSpWR8tL2+G1+ap8bP+dqg=", + "engines": { + "node": "*" + } + }, + "node_modules/aws4": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.10.0.tgz", + "integrity": "sha512-3YDiu347mtVtjpyV3u5kVqQLP242c06zwDOgpeRnybmXlYYsLbtTrUBUm8i8srONt+FWobl5aibnU1030PeeuA==" + }, + "node_modules/azure-actions-webclient": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/azure-actions-webclient/-/azure-actions-webclient-1.0.11.tgz", + "integrity": "sha512-4l0V47W3DDreTFCXEdhWr9LhmgSZecWWyKhm06XOAUodQ82x9ndFFet4OywCDcvQ1u4Gh11xPaSv62FACcgyyw==", + "dependencies": { + "@actions/core": "^1.1.3", + "@actions/exec": "^1.0.1", + "@actions/io": "^1.0.1", + "fs": "0.0.1-security", + "q": "^1.5.1", + "querystring": "^0.2.0", + "typed-rest-client": "^1.5.0", + "util": "^0.12.1" + } + }, + "node_modules/azure-storage": { + "version": "2.10.3", + "resolved": "https://registry.npmjs.org/azure-storage/-/azure-storage-2.10.3.tgz", + "integrity": "sha512-IGLs5Xj6kO8Ii90KerQrrwuJKexLgSwYC4oLWmc11mzKe7Jt2E5IVg+ZQ8K53YWZACtVTMBNO3iGuA+4ipjJxQ==", + "deprecated": "Please note: newer packages @azure/storage-blob, @azure/storage-queue and @azure/storage-file are available as of November 2019 and @azure/data-tables is available as of June 2021. While the legacy azure-storage package will continue to receive critical bug fixes, we strongly encourage you to upgrade. Migration guide can be found: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/storage/MigrationGuide.md", + "dependencies": { + "browserify-mime": "~1.2.9", + "extend": "^3.0.2", + "json-edm-parser": "0.1.2", + "md5.js": "1.3.4", + "readable-stream": "~2.0.0", + "request": "^2.86.0", + "underscore": "~1.8.3", + "uuid": "^3.0.0", + "validator": "~9.4.1", + "xml2js": "0.2.8", + "xmlbuilder": "^9.0.7" + }, + "engines": { + "node": ">= 0.8.26" + } + }, + "node_modules/azure-storage/node_modules/sax": { + "version": "0.5.8", + "resolved": "https://registry.npmjs.org/sax/-/sax-0.5.8.tgz", + "integrity": "sha1-1HLbIo6zMcJQaw6MFVJK25OdEsE=" + }, + "node_modules/azure-storage/node_modules/xml2js": { + "version": "0.2.8", + "resolved": "https://registry.npmjs.org/xml2js/-/xml2js-0.2.8.tgz", + "integrity": "sha1-m4FpCTFjH/CdGVdUn69U9PmAs8I=", + "dependencies": { + "sax": "0.5.x" + } + }, + "node_modules/azure-storage/node_modules/xmlbuilder": { + "version": "9.0.7", + "resolved": "https://registry.npmjs.org/xmlbuilder/-/xmlbuilder-9.0.7.tgz", + "integrity": "sha1-Ey7mPS7FVlxVfiD0wi35rKaGsQ0=", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/balanced-match": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.0.tgz", + "integrity": "sha1-ibTRmasr7kneFk6gK4nORi1xt2c=" + }, + "node_modules/base64-js": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.3.1.tgz", + "integrity": "sha512-mLQ4i2QO1ytvGWFWmcngKO//JXAQueZvwEKtjgQFM4jIK0kU+ytMfplL8j+n5mspOfjHwoAg+9yhb7BwAHm36g==" + }, + "node_modules/bcrypt-pbkdf": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz", + "integrity": "sha1-pDAdOJtqQ/m2f/PKEaP2Y342Dp4=", + "dependencies": { + "tweetnacl": "^0.14.3" + } + }, + "node_modules/bl": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/bl/-/bl-4.0.3.tgz", + "integrity": "sha512-fs4G6/Hu4/EE+F75J8DuN/0IpQqNjAdC7aEQv7Qt8MHGUH7Ckv2MwTEEeN9QehD0pfIDkMI1bkHYkKy7xHyKIg==", + "dependencies": { + "buffer": "^5.5.0", + "inherits": "^2.0.4", + "readable-stream": "^3.4.0" + } + }, + "node_modules/bl/node_modules/readable-stream": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz", + "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/block-stream": { + "version": "0.0.9", + "resolved": "https://registry.npmjs.org/block-stream/-/block-stream-0.0.9.tgz", + "integrity": "sha1-E+v+d4oDIFz+A3UUgeu0szAMEmo=", + "dependencies": { + "inherits": "~2.0.0" + }, + "engines": { + "node": "0.4 || >=0.5.8" + } + }, + "node_modules/bluebird": { + "version": "2.11.0", + "resolved": "https://registry.npmjs.org/bluebird/-/bluebird-2.11.0.tgz", + "integrity": "sha1-U0uQM8AiyVecVro7Plpcqvu2UOE=" + }, + "node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/browserify-mime": { + "version": "1.2.9", + "resolved": "https://registry.npmjs.org/browserify-mime/-/browserify-mime-1.2.9.tgz", + "integrity": "sha1-rrGvKN5sDXpqLOQK22j/GEIq8x8=" + }, + "node_modules/buffer": { + "version": "5.6.0", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-5.6.0.tgz", + "integrity": "sha512-/gDYp/UtU0eA1ys8bOs9J6a+E/KWIY+DZ+Q2WESNUA0jFRsJOc0SNUO6xJ5SGA1xueg3NL65W6s+NY5l9cunuw==", + "dependencies": { + "base64-js": "^1.0.2", + "ieee754": "^1.1.4" + } + }, + "node_modules/buffer-crc32": { + "version": "0.2.13", + "resolved": "https://registry.npmjs.org/buffer-crc32/-/buffer-crc32-0.2.13.tgz", + "integrity": "sha1-DTM+PwDqxQqhRUq9MO+MKl2ackI=", + "engines": { + "node": "*" + } + }, + "node_modules/callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "engines": { + "node": ">=6" + } + }, + "node_modules/caseless": { + "version": "0.12.0", + "resolved": "https://registry.npmjs.org/caseless/-/caseless-0.12.0.tgz", + "integrity": "sha1-G2gcIf+EAzyCZUMJBolCDRhxUdw=" + }, + "node_modules/chalk": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", + "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "dependencies": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/chardet": { + "version": "0.7.0", + "resolved": "https://registry.npmjs.org/chardet/-/chardet-0.7.0.tgz", + "integrity": "sha512-mT8iDcrh03qDGRRmoA2hmBJnxpllMR+0/0qlzjqZES6NdiWDcZkCNAk4rPFZ9Q85r27unkiNNg8ZOiwZXBHwcA==" + }, + "node_modules/cli-cursor": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-2.1.0.tgz", + "integrity": "sha1-s12sN2R5+sw+lHR9QdDQ9SOP/LU=", + "dependencies": { + "restore-cursor": "^2.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/cli-width": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/cli-width/-/cli-width-2.2.1.tgz", + "integrity": "sha512-GRMWDxpOB6Dgk2E5Uo+3eEBvtOOlimMmpbFiKuLFnQzYDavtLFY3K5ona41jgN/WdRZtG7utuVSVTL4HbZHGkw==" + }, + "node_modules/color-convert": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "dependencies": { + "color-name": "1.1.3" + } + }, + "node_modules/color-name": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=" + }, + "node_modules/combined-stream": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", + "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", + "dependencies": { + "delayed-stream": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/commander": { + "version": "2.20.3", + "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz", + "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==" + }, + "node_modules/compress-commons": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/compress-commons/-/compress-commons-4.0.1.tgz", + "integrity": "sha512-xZm9o6iikekkI0GnXCmAl3LQGZj5TBDj0zLowsqi7tJtEa3FMGSEcHcqrSJIrOAk1UG/NBbDn/F1q+MG/p/EsA==", + "dependencies": { + "buffer-crc32": "^0.2.13", + "crc32-stream": "^4.0.0", + "normalize-path": "^3.0.0", + "readable-stream": "^3.6.0" + }, + "engines": { + "node": ">= 10" + } + }, + "node_modules/compress-commons/node_modules/readable-stream": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz", + "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=" + }, + "node_modules/core-util-is": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", + "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=" + }, + "node_modules/crc": { + "version": "3.8.0", + "resolved": "https://registry.npmjs.org/crc/-/crc-3.8.0.tgz", + "integrity": "sha512-iX3mfgcTMIq3ZKLIsVFAbv7+Mc10kxabAGQb8HvjA1o3T1PIYprbakQ65d3I+2HGHt6nSKkM9PYjgoJO2KcFBQ==", + "dependencies": { + "buffer": "^5.1.0" + } + }, + "node_modules/crc32-stream": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/crc32-stream/-/crc32-stream-4.0.0.tgz", + "integrity": "sha512-tyMw2IeUX6t9jhgXI6um0eKfWq4EIDpfv5m7GX4Jzp7eVelQ360xd8EPXJhp2mHwLQIkqlnMLjzqSZI3a+0wRw==", + "dependencies": { + "crc": "^3.4.4", + "readable-stream": "^3.4.0" + }, + "engines": { + "node": ">= 10" + } + }, + "node_modules/crc32-stream/node_modules/readable-stream": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz", + "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/cross-spawn": { + "version": "6.0.5", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-6.0.5.tgz", + "integrity": "sha512-eTVLrBSt7fjbDygz805pMnstIs2VTBNkRm0qxZd+M7A5XDdxVRWO5MxGBXZhjY4cqLYLdtrGqRf8mBPmzwSpWQ==", + "dependencies": { + "nice-try": "^1.0.4", + "path-key": "^2.0.1", + "semver": "^5.5.0", + "shebang-command": "^1.2.0", + "which": "^1.2.9" + }, + "engines": { + "node": ">=4.8" + } + }, + "node_modules/cross-spawn/node_modules/semver": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", + "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", + "bin": { + "semver": "bin/semver" + } + }, + "node_modules/dashdash": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/dashdash/-/dashdash-1.14.1.tgz", + "integrity": "sha1-hTz6D3y+L+1d4gMmuN1YEDX24vA=", + "dependencies": { + "assert-plus": "^1.0.0" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/debug": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz", + "integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==", + "deprecated": "Debug versions >=3.2.0 <3.2.7 || >=4 <4.3.1 have a low-severity ReDos regression when used in a Node.js environment. It is recommended you upgrade to 3.2.7 or 4.3.1. (https://github.com/visionmedia/debug/issues/797)", + "dependencies": { + "ms": "^2.1.1" + } + }, + "node_modules/deep-is": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.3.tgz", + "integrity": "sha1-s2nW+128E+7PUk+RsHD+7cNXzzQ=" + }, + "node_modules/define-properties": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.1.3.tgz", + "integrity": "sha512-3MqfYKj2lLzdMSf8ZIZE/V+Zuy+BgD6f164e8K2w7dgnpKArBDerGYpM46IYYcjnkdPNMjPk9A6VFB8+3SKlXQ==", + "dependencies": { + "object-keys": "^1.0.12" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/delayed-stream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", + "integrity": "sha1-3zrhmayt+31ECqrgsp4icrJOxhk=", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/doctrine": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz", + "integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==", + "dependencies": { + "esutils": "^2.0.2" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/ecc-jsbn": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz", + "integrity": "sha1-OoOpBOVDUyh4dMVkt1SThoSamMk=", + "dependencies": { + "jsbn": "~0.1.0", + "safer-buffer": "^2.1.0" + } + }, + "node_modules/emoji-regex": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-7.0.3.tgz", + "integrity": "sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA==" + }, + "node_modules/end-of-stream": { + "version": "1.4.4", + "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz", + "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==", + "dependencies": { + "once": "^1.4.0" + } + }, + "node_modules/es-abstract": { + "version": "1.17.6", + "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.17.6.tgz", + "integrity": "sha512-Fr89bON3WFyUi5EvAeI48QTWX0AyekGgLA8H+c+7fbfCkJwRWRMLd8CQedNEyJuoYYhmtEqY92pgte1FAhBlhw==", + "dependencies": { + "es-to-primitive": "^1.2.1", + "function-bind": "^1.1.1", + "has": "^1.0.3", + "has-symbols": "^1.0.1", + "is-callable": "^1.2.0", + "is-regex": "^1.1.0", + "object-inspect": "^1.7.0", + "object-keys": "^1.1.1", + "object.assign": "^4.1.0", + "string.prototype.trimend": "^1.0.1", + "string.prototype.trimstart": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/es-to-primitive": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/es-to-primitive/-/es-to-primitive-1.2.1.tgz", + "integrity": "sha512-QCOllgZJtaUo9miYBcLChTUaHNjJF3PYs1VidD7AwiEj1kYxKeQTctLAezAOH5ZKRH0g2IgPn6KwB4IT8iRpvA==", + "dependencies": { + "is-callable": "^1.1.4", + "is-date-object": "^1.0.1", + "is-symbol": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=", + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/eslint": { + "version": "5.16.0", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-5.16.0.tgz", + "integrity": "sha512-S3Rz11i7c8AA5JPv7xAH+dOyq/Cu/VXHiHXBPOU1k/JAM5dXqQPt3qcrhpHSorXmrpu2g0gkIBVXAqCpzfoZIg==", + "dependencies": { + "@babel/code-frame": "^7.0.0", + "ajv": "^6.9.1", + "chalk": "^2.1.0", + "cross-spawn": "^6.0.5", + "debug": "^4.0.1", + "doctrine": "^3.0.0", + "eslint-scope": "^4.0.3", + "eslint-utils": "^1.3.1", + "eslint-visitor-keys": "^1.0.0", + "espree": "^5.0.1", + "esquery": "^1.0.1", + "esutils": "^2.0.2", + "file-entry-cache": "^5.0.1", + "functional-red-black-tree": "^1.0.1", + "glob": "^7.1.2", + "globals": "^11.7.0", + "ignore": "^4.0.6", + "import-fresh": "^3.0.0", + "imurmurhash": "^0.1.4", + "inquirer": "^6.2.2", + "js-yaml": "^3.13.0", + "json-stable-stringify-without-jsonify": "^1.0.1", + "levn": "^0.3.0", + "lodash": "^4.17.11", + "minimatch": "^3.0.4", + "mkdirp": "^0.5.1", + "natural-compare": "^1.4.0", + "optionator": "^0.8.2", + "path-is-inside": "^1.0.2", + "progress": "^2.0.0", + "regexpp": "^2.0.1", + "semver": "^5.5.1", + "strip-ansi": "^4.0.0", + "strip-json-comments": "^2.0.1", + "table": "^5.2.3", + "text-table": "^0.2.0" + }, + "bin": { + "eslint": "bin/eslint.js" + }, + "engines": { + "node": "^6.14.0 || ^8.10.0 || >=9.10.0" + } + }, + "node_modules/eslint-visitor-keys": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-1.3.0.tgz", + "integrity": "sha512-6J72N8UNa462wa/KFODt/PJ3IU60SDpC3QXC1Hjc1BXXpfL2C9R5+AU7jhe0F6GREqVMh4Juu+NY7xn+6dipUQ==", + "engines": { + "node": ">=4" + } + }, + "node_modules/eslint/node_modules/eslint-scope": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-4.0.3.tgz", + "integrity": "sha512-p7VutNr1O/QrxysMo3E45FjYDTeXBy0iTltPFNSqKAIfjDSXC+4dj+qfyuD8bfAXrW/y6lW3O76VaYNPKfpKrg==", + "dependencies": { + "esrecurse": "^4.1.0", + "estraverse": "^4.1.1" + }, + "engines": { + "node": ">=4.0.0" + } + }, + "node_modules/eslint/node_modules/eslint-utils": { + "version": "1.4.3", + "resolved": "https://registry.npmjs.org/eslint-utils/-/eslint-utils-1.4.3.tgz", + "integrity": "sha512-fbBN5W2xdY45KulGXmLHZ3c3FHfVYmKg0IrAKGOkT/464PQsx2UeIzfz1RmEci+KLm1bBaAzZAh8+/E+XAeZ8Q==", + "dependencies": { + "eslint-visitor-keys": "^1.1.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/eslint/node_modules/semver": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", + "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", + "bin": { + "semver": "bin/semver" + } + }, + "node_modules/espree": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/espree/-/espree-5.0.1.tgz", + "integrity": "sha512-qWAZcWh4XE/RwzLJejfcofscgMc9CamR6Tn1+XRXNzrvUSSbiAjGOI/fggztjIi7y9VLPqnICMIPiGyr8JaZ0A==", + "dependencies": { + "acorn": "^6.0.7", + "acorn-jsx": "^5.0.0", + "eslint-visitor-keys": "^1.0.0" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/esquery": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.3.1.tgz", + "integrity": "sha512-olpvt9QG0vniUBZspVRN6lwB7hOZoTRtT+jzR+tS4ffYx2mzbw+z0XCOk44aaLYKApNX5nMm+E+P6o25ip/DHQ==", + "dependencies": { + "estraverse": "^5.1.0" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/esquery/node_modules/estraverse": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.1.0.tgz", + "integrity": "sha512-FyohXK+R0vE+y1nHLoBM7ZTyqRpqAlhdZHCWIWEviFLiGB8b04H6bQs8G+XTthacvT8VuwvteiP7RJSxMs8UEw==", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/esrecurse": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.2.1.tgz", + "integrity": "sha512-64RBB++fIOAXPw3P9cy89qfMlvZEXZkqqJkjqqXIvzP5ezRZjW+lPWjw35UX/3EhUPFYbg5ER4JYgDw4007/DQ==", + "dependencies": { + "estraverse": "^4.1.0" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estraverse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", + "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/extend": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", + "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==" + }, + "node_modules/external-editor": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/external-editor/-/external-editor-3.1.0.tgz", + "integrity": "sha512-hMQ4CX1p1izmuLYyZqLMO/qGNw10wSv9QDCPfzXfyFrOaCSSoRfqE1Kf1s5an66J5JZC62NewG+mK49jOCtQew==", + "dependencies": { + "chardet": "^0.7.0", + "iconv-lite": "^0.4.24", + "tmp": "^0.0.33" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/extsprintf": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/extsprintf/-/extsprintf-1.3.0.tgz", + "integrity": "sha1-lpGEQOMEGnpBT4xS48V06zw+HgU=", + "engines": [ + "node >=0.6.0" + ] + }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==" + }, + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==" + }, + "node_modules/fast-levenshtein": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", + "integrity": "sha1-PYpcZog6FqMMqGQ+hR8Zuqd5eRc=" + }, + "node_modules/fd-slicer": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/fd-slicer/-/fd-slicer-1.1.0.tgz", + "integrity": "sha1-JcfInLH5B3+IkbvmHY85Dq4lbx4=", + "dependencies": { + "pend": "~1.2.0" + } + }, + "node_modules/figures": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/figures/-/figures-2.0.0.tgz", + "integrity": "sha1-OrGi0qYsi/tDGgyUy3l6L84nyWI=", + "dependencies": { + "escape-string-regexp": "^1.0.5" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/file-entry-cache": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-5.0.1.tgz", + "integrity": "sha512-bCg29ictuBaKUwwArK4ouCaqDgLZcysCFLmM/Yn/FDoqndh/9vNuQfXRDvTuXKLxfD/JtZQGKFT8MGcJBK644g==", + "dependencies": { + "flat-cache": "^2.0.1" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/flat-cache": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-2.0.1.tgz", + "integrity": "sha512-LoQe6yDuUMDzQAEH8sgmh4Md6oZnc/7PjtwjNFSzveXqSHt6ka9fPBuso7IGf9Rz4uqnSnWiFH2B/zj24a5ReA==", + "dependencies": { + "flatted": "^2.0.0", + "rimraf": "2.6.3", + "write": "1.0.3" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/flatted": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-2.0.2.tgz", + "integrity": "sha512-r5wGx7YeOwNWNlCA0wQ86zKyDLMQr+/RB8xy74M4hTphfmjlijTSSXGuH8rnvKZnfT9i+75zmd8jcKdMR4O6jA==" + }, + "node_modules/foreach": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/foreach/-/foreach-2.0.5.tgz", + "integrity": "sha1-C+4AUBiusmDQo6865ljdATbsG5k=" + }, + "node_modules/forever-agent": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/forever-agent/-/forever-agent-0.6.1.tgz", + "integrity": "sha1-+8cfDEGt6zf5bFd60e1C2P2sypE=", + "engines": { + "node": "*" + } + }, + "node_modules/form-data": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-2.3.3.tgz", + "integrity": "sha512-1lLKB2Mu3aGP1Q/2eCOx0fNbRMe7XdwktwOruhfqqd0rIJWwN4Dh+E3hrPSlDCXnSR7UtZ1N38rVXm+6+MEhJQ==", + "dependencies": { + "asynckit": "^0.4.0", + "combined-stream": "^1.0.6", + "mime-types": "^2.1.12" + }, + "engines": { + "node": ">= 0.12" + } + }, + "node_modules/fs": { + "version": "0.0.1-security", + "resolved": "https://registry.npmjs.org/fs/-/fs-0.0.1-security.tgz", + "integrity": "sha1-invTcYa23d84E/I4WLV+yq9eQdQ=" + }, + "node_modules/fs-constants": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs-constants/-/fs-constants-1.0.0.tgz", + "integrity": "sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow==" + }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=" + }, + "node_modules/fstream": { + "version": "1.0.12", + "resolved": "https://registry.npmjs.org/fstream/-/fstream-1.0.12.tgz", + "integrity": "sha512-WvJ193OHa0GHPEL+AycEJgxvBEwyfRkN1vhjca23OaPVMCaLCXTd5qAu82AjTcgP1UJmytkOKb63Ypde7raDIg==", + "dependencies": { + "graceful-fs": "^4.1.2", + "inherits": "~2.0.0", + "mkdirp": ">=0.5 0", + "rimraf": "2" + }, + "engines": { + "node": ">=0.6" + } + }, + "node_modules/function-bind": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", + "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==" + }, + "node_modules/functional-red-black-tree": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/functional-red-black-tree/-/functional-red-black-tree-1.0.1.tgz", + "integrity": "sha1-GwqzvVU7Kg1jmdKcDj6gslIHgyc=" + }, + "node_modules/getpass": { + "version": "0.1.7", + "resolved": "https://registry.npmjs.org/getpass/-/getpass-0.1.7.tgz", + "integrity": "sha1-Xv+OPmhNVprkyysSgmBOi6YhSfo=", + "dependencies": { + "assert-plus": "^1.0.0" + } + }, + "node_modules/glob": { + "version": "7.1.6", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz", + "integrity": "sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.0.4", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/globals": { + "version": "11.12.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", + "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", + "engines": { + "node": ">=4" + } + }, + "node_modules/graceful-fs": { + "version": "4.2.4", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.4.tgz", + "integrity": "sha512-WjKPNJF79dtJAVniUlGGWHYGz2jWxT6VhN/4m1NdkbZ2nOsEF+cI1Edgql5zCRhs/VsQYRvrXctxktVXZUkixw==" + }, + "node_modules/har-schema": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/har-schema/-/har-schema-2.0.0.tgz", + "integrity": "sha1-qUwiJOvKwEeCoNkDVSHyRzW37JI=", + "engines": { + "node": ">=4" + } + }, + "node_modules/har-validator": { + "version": "5.1.5", + "resolved": "https://registry.npmjs.org/har-validator/-/har-validator-5.1.5.tgz", + "integrity": "sha512-nmT2T0lljbxdQZfspsno9hgrG3Uir6Ks5afism62poxqBM6sDnMEuPmzTq8XN0OEwqKLLdh1jQI3qyE66Nzb3w==", + "deprecated": "this library is no longer supported", + "dependencies": { + "ajv": "^6.12.3", + "har-schema": "^2.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/has": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", + "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", + "dependencies": { + "function-bind": "^1.1.1" + }, + "engines": { + "node": ">= 0.4.0" + } + }, + "node_modules/has-flag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=", + "engines": { + "node": ">=4" + } + }, + "node_modules/has-symbols": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.1.tgz", + "integrity": "sha512-PLcsoqu++dmEIZB+6totNFKq/7Do+Z0u4oT0zKOJNl3lYK6vGwwu2hjHs+68OEZbTjiUE9bgOABXbP/GvrS0Kg==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/hash-base": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/hash-base/-/hash-base-3.1.0.tgz", + "integrity": "sha512-1nmYp/rhMDiE7AYkDw+lLwlAzz0AntGIe51F3RfFfEqyQ3feY2eI/NcwC6umIQVOASPMsWJLJScWKSSvzL9IVA==", + "dependencies": { + "inherits": "^2.0.4", + "readable-stream": "^3.6.0", + "safe-buffer": "^5.2.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/hash-base/node_modules/readable-stream": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz", + "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/http-signature": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/http-signature/-/http-signature-1.2.0.tgz", + "integrity": "sha1-muzZJRFHcvPZW2WmCruPfBj7rOE=", + "dependencies": { + "assert-plus": "^1.0.0", + "jsprim": "^1.2.2", + "sshpk": "^1.7.0" + }, + "engines": { + "node": ">=0.8", + "npm": ">=1.3.7" + } + }, + "node_modules/iconv-lite": { + "version": "0.4.24", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", + "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/ieee754": { + "version": "1.1.13", + "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.1.13.tgz", + "integrity": "sha512-4vf7I2LYV/HaWerSo3XmlMkp5eZ83i+/CDluXi/IGTs/O1sejBNhTtnxzmRZfvOUqj7lZjqHkeTvpgSFDlWZTg==" + }, + "node_modules/ignore": { + "version": "4.0.6", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-4.0.6.tgz", + "integrity": "sha512-cyFDKrqc/YdcWFniJhzI42+AzS+gNwmUzOSFcRCQYwySuBBBy/KjuxWLZ/FHEH6Moq1NizMOBWyTcv8O4OZIMg==", + "engines": { + "node": ">= 4" + } + }, + "node_modules/immediate": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/immediate/-/immediate-3.0.6.tgz", + "integrity": "sha1-nbHb0Pr43m++D13V5Wu2BigN5ps=" + }, + "node_modules/import-fresh": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.2.1.tgz", + "integrity": "sha512-6e1q1cnWP2RXD9/keSkxHScg508CdXqXWgWBaETNhyuBFz+kUZlKboh+ISK+bU++DmbHimVBrOz/zzPe0sZ3sQ==", + "dependencies": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha1-khi5srkoojixPcT7a21XbyMUU+o=", + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=", + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" + }, + "node_modules/inquirer": { + "version": "6.5.2", + "resolved": "https://registry.npmjs.org/inquirer/-/inquirer-6.5.2.tgz", + "integrity": "sha512-cntlB5ghuB0iuO65Ovoi8ogLHiWGs/5yNrtUcKjFhSSiVeAIVpD7koaSU9RM8mpXw5YDi9RdYXGQMaOURB7ycQ==", + "dependencies": { + "ansi-escapes": "^3.2.0", + "chalk": "^2.4.2", + "cli-cursor": "^2.1.0", + "cli-width": "^2.0.0", + "external-editor": "^3.0.3", + "figures": "^2.0.0", + "lodash": "^4.17.12", + "mute-stream": "0.0.7", + "run-async": "^2.2.0", + "rxjs": "^6.4.0", + "string-width": "^2.1.0", + "strip-ansi": "^5.1.0", + "through": "^2.3.6" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/inquirer/node_modules/ansi-regex": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz", + "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==", + "engines": { + "node": ">=6" + } + }, + "node_modules/inquirer/node_modules/strip-ansi": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz", + "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==", + "dependencies": { + "ansi-regex": "^4.1.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/is-arguments": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-arguments/-/is-arguments-1.0.4.tgz", + "integrity": "sha512-xPh0Rmt8NE65sNzvyUmWgI1tz3mKq74lGA0mL8LYZcoIzKOzDh6HmrYm3d18k60nHerC8A9Km8kYu87zfSFnLA==", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/is-callable": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.2.0.tgz", + "integrity": "sha512-pyVD9AaGLxtg6srb2Ng6ynWJqkHU9bEM087AKck0w8QwDarTfNcpIYoU8x8Hv2Icm8u6kFJM18Dag8lyqGkviw==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-date-object": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-date-object/-/is-date-object-1.0.2.tgz", + "integrity": "sha512-USlDT524woQ08aoZFzh3/Z6ch9Y/EWXEHQ/AaRN0SkKq4t2Jw2R2339tSXmwuVoY7LLlBCbOIlx2myP/L5zk0g==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", + "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=", + "engines": { + "node": ">=4" + } + }, + "node_modules/is-generator-function": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/is-generator-function/-/is-generator-function-1.0.7.tgz", + "integrity": "sha512-YZc5EwyO4f2kWCax7oegfuSr9mFz1ZvieNYBEjmukLxgXfBUbxAWGVF7GZf0zidYtoBl3WvC07YK0wT76a+Rtw==", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/is-regex": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.1.0.tgz", + "integrity": "sha512-iI97M8KTWID2la5uYXlkbSDQIg4F6o1sYboZKKTDpnDQMLtUL86zxhgDet3Q2SriaYsyGqZ6Mn2SjbRKeLHdqw==", + "dependencies": { + "has-symbols": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-symbol": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/is-symbol/-/is-symbol-1.0.3.tgz", + "integrity": "sha512-OwijhaRSgqvhm/0ZdAcXNZt9lYdKFpcRDT5ULUuYXPoT794UNOdU+gpT6Rzo7b4V2HUl/op6GqY894AZwv9faQ==", + "dependencies": { + "has-symbols": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-typed-array": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/is-typed-array/-/is-typed-array-1.1.3.tgz", + "integrity": "sha512-BSYUBOK/HJibQ30wWkWold5txYwMUXQct9YHAQJr8fSwvZoiglcqB0pd7vEN23+Tsi9IUEjztdOSzl4qLVYGTQ==", + "dependencies": { + "available-typed-arrays": "^1.0.0", + "es-abstract": "^1.17.4", + "foreach": "^2.0.5", + "has-symbols": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-typedarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz", + "integrity": "sha1-5HnICFjfDBsR3dppQPlgEfzaSpo=" + }, + "node_modules/isarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", + "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=" + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=" + }, + "node_modules/isstream": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/isstream/-/isstream-0.1.2.tgz", + "integrity": "sha1-R+Y/evVa+m+S4VAOaQ64uFKcCZo=" + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==" + }, + "node_modules/js-yaml": { + "version": "3.14.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.0.tgz", + "integrity": "sha512-/4IbIeHcD9VMHFqDR/gQ7EdZdLimOvW2DdcxFjdyyZ9NsbS+ccrXqVWDtab/lRl5AlUqmpBx8EhPaWR+OtY17A==", + "dependencies": { + "argparse": "^1.0.7", + "esprima": "^4.0.0" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/js-yaml/node_modules/esprima": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", + "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", + "bin": { + "esparse": "bin/esparse.js", + "esvalidate": "bin/esvalidate.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/jsbn": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-0.1.1.tgz", + "integrity": "sha1-peZUwuWi3rXyAdls77yoDA7y9RM=" + }, + "node_modules/json-edm-parser": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/json-edm-parser/-/json-edm-parser-0.1.2.tgz", + "integrity": "sha1-HmCw/vG8CvZ7wNFG393lSGzWFbQ=", + "dependencies": { + "jsonparse": "~1.2.0" + } + }, + "node_modules/json-schema": { + "version": "0.2.3", + "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.2.3.tgz", + "integrity": "sha1-tIDIkuWaLwWVTOcnvT8qTogvnhM=" + }, + "node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" + }, + "node_modules/json-stable-stringify-without-jsonify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", + "integrity": "sha1-nbe1lJatPzz+8wp1FC0tkwrXJlE=" + }, + "node_modules/json-stringify-safe": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz", + "integrity": "sha1-Epai1Y/UXxmg9s4B1lcB4sc1tus=" + }, + "node_modules/jsonparse": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/jsonparse/-/jsonparse-1.2.0.tgz", + "integrity": "sha1-XAxWhRBxYOcv50ib3eoLRMK8Z70=", + "engines": [ + "node >= 0.2.0" + ] + }, + "node_modules/jsprim": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/jsprim/-/jsprim-1.4.1.tgz", + "integrity": "sha1-MT5mvB5cwG5Di8G3SZwuXFastqI=", + "engines": [ + "node >=0.6.0" + ], + "dependencies": { + "assert-plus": "1.0.0", + "extsprintf": "1.3.0", + "json-schema": "0.2.3", + "verror": "1.10.0" + } + }, + "node_modules/jszip": { + "version": "3.5.0", + "resolved": "https://registry.npmjs.org/jszip/-/jszip-3.5.0.tgz", + "integrity": "sha512-WRtu7TPCmYePR1nazfrtuF216cIVon/3GWOvHS9QR5bIwSbnxtdpma6un3jyGGNhHsKCSzn5Ypk+EkDRvTGiFA==", + "dependencies": { + "lie": "~3.3.0", + "pako": "~1.0.2", + "readable-stream": "~2.3.6", + "set-immediate-shim": "~1.0.1" + } + }, + "node_modules/jszip/node_modules/process-nextick-args": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", + "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==" + }, + "node_modules/jszip/node_modules/readable-stream": { + "version": "2.3.7", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", + "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "node_modules/jszip/node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" + }, + "node_modules/jszip/node_modules/string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "dependencies": { + "safe-buffer": "~5.1.0" + } + }, + "node_modules/lazystream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/lazystream/-/lazystream-1.0.0.tgz", + "integrity": "sha1-9plf4PggOS9hOWvolGJAe7dxaOQ=", + "dependencies": { + "readable-stream": "^2.0.5" + }, + "engines": { + "node": ">= 0.6.3" + } + }, + "node_modules/levn": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.3.0.tgz", + "integrity": "sha1-OwmSTt+fCDwEkP3UwLxEIeBHZO4=", + "dependencies": { + "prelude-ls": "~1.1.2", + "type-check": "~0.3.2" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/lie": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/lie/-/lie-3.3.0.tgz", + "integrity": "sha512-UaiMJzeWRlEujzAuw5LokY1L5ecNQYZKfmyZ9L7wDHb/p5etKaxXhohBcrw0EYby+G/NA52vRSN4N39dxHAIwQ==", + "dependencies": { + "immediate": "~3.0.5" + } + }, + "node_modules/lodash": { + "version": "4.17.19", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.19.tgz", + "integrity": "sha512-JNvd8XER9GQX0v2qJgsaN/mzFCNA5BRe/j8JN9d+tWyGLSodKQHKFicdwNYzWwI3wjRnaKPsGj1XkBjx/F96DQ==" + }, + "node_modules/lodash.defaults": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/lodash.defaults/-/lodash.defaults-4.2.0.tgz", + "integrity": "sha1-0JF4cW/+pN3p5ft7N/bwgCJ0WAw=" + }, + "node_modules/lodash.difference": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/lodash.difference/-/lodash.difference-4.5.0.tgz", + "integrity": "sha1-nMtOUF1Ia5FlE0V3KIWi3yf9AXw=" + }, + "node_modules/lodash.flatten": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/lodash.flatten/-/lodash.flatten-4.4.0.tgz", + "integrity": "sha1-8xwiIlqWMtK7+OSt2+8kCqdlph8=" + }, + "node_modules/lodash.isplainobject": { + "version": "4.0.6", + "resolved": "https://registry.npmjs.org/lodash.isplainobject/-/lodash.isplainobject-4.0.6.tgz", + "integrity": "sha1-fFJqUtibRcRcxpC4gWO+BJf1UMs=" + }, + "node_modules/lodash.union": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/lodash.union/-/lodash.union-4.6.0.tgz", + "integrity": "sha1-SLtQiECfFvGCFmZkHETdGqrjzYg=" + }, + "node_modules/md5.js": { + "version": "1.3.4", + "resolved": "https://registry.npmjs.org/md5.js/-/md5.js-1.3.4.tgz", + "integrity": "sha1-6b296UogpawYsENA/Fdk1bCdkB0=", + "dependencies": { + "hash-base": "^3.0.0", + "inherits": "^2.0.1" + } + }, + "node_modules/mime-db": { + "version": "1.44.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.44.0.tgz", + "integrity": "sha512-/NOTfLrsPBVeH7YtFPgsVWveuL+4SjjYxaQ1xtM1KMFj7HdxlBlxeyNLzhyJVx7r4rZGJAZ/6lkKCitSc/Nmpg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.27", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.27.tgz", + "integrity": "sha512-JIhqnCasI9yD+SsmkquHBxTSEuZdQX5BuQnS2Vc7puQQQ+8yiP5AY5uWhpdv4YL4VM5c6iliiYWPgJ/nJQLp7w==", + "dependencies": { + "mime-db": "1.44.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mimic-fn": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-1.2.0.tgz", + "integrity": "sha512-jf84uxzwiuiIVKiOLpfYk7N46TSy8ubTonmneY9vrpHNAnp0QBt2BxWV9dO3/j+BoVAb+a5G6YDPW3M5HOdMWQ==", + "engines": { + "node": ">=4" + } + }, + "node_modules/minimatch": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", + "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/minimist": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz", + "integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw==" + }, + "node_modules/mkdirp": { + "version": "0.5.5", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", + "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", + "dependencies": { + "minimist": "^1.2.5" + }, + "bin": { + "mkdirp": "bin/cmd.js" + } + }, + "node_modules/mout": { + "version": "0.11.1", + "resolved": "https://registry.npmjs.org/mout/-/mout-0.11.1.tgz", + "integrity": "sha1-ujYR318OWx/7/QEWa48C0fX6K5k=" + }, + "node_modules/ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" + }, + "node_modules/mute-stream": { + "version": "0.0.7", + "resolved": "https://registry.npmjs.org/mute-stream/-/mute-stream-0.0.7.tgz", + "integrity": "sha1-MHXOk7whuPq0PhvE2n6BFe0ee6s=" + }, + "node_modules/natural-compare": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", + "integrity": "sha1-Sr6/7tdUHywnrPspvbvRXI1bpPc=" + }, + "node_modules/nice-try": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/nice-try/-/nice-try-1.0.5.tgz", + "integrity": "sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ==" + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/oauth-sign": { + "version": "0.9.0", + "resolved": "https://registry.npmjs.org/oauth-sign/-/oauth-sign-0.9.0.tgz", + "integrity": "sha512-fexhUFFPTGV8ybAtSIGbV6gOkSv8UtRbDBnAyLQw4QPKkgNlsH2ByPGtMUqdWkos6YCRmAqViwgZrJc/mRDzZQ==", + "engines": { + "node": "*" + } + }, + "node_modules/object-inspect": { + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.8.0.tgz", + "integrity": "sha512-jLdtEOB112fORuypAyl/50VRVIBIdVQOSUUGQHzJ4xBSbit81zRarz7GThkEFZy1RceYrWYcPcBFPQwHyAc1gA==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/object-keys": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz", + "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/object.assign": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.0.tgz", + "integrity": "sha512-exHJeq6kBKj58mqGyTQ9DFvrZC/eR6OwxzoM9YRoGBqrXYonaFyGiFMuc9VZrXf7DarreEwMpurG3dd+CNyW5w==", + "dependencies": { + "define-properties": "^1.1.2", + "function-bind": "^1.1.1", + "has-symbols": "^1.0.0", + "object-keys": "^1.0.11" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=", + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/onetime": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-2.0.1.tgz", + "integrity": "sha1-BnQoIw/WdEOyeUsiu6UotoZ5YtQ=", + "dependencies": { + "mimic-fn": "^1.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/optionator": { + "version": "0.8.3", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.8.3.tgz", + "integrity": "sha512-+IW9pACdk3XWmmTXG8m3upGUJst5XRGzxMRjXzAuJ1XnIFNvfhjjIuYkDvysnPQ7qzqVzLt78BCruntqRhWQbA==", + "dependencies": { + "deep-is": "~0.1.3", + "fast-levenshtein": "~2.0.6", + "levn": "~0.3.0", + "prelude-ls": "~1.1.2", + "type-check": "~0.3.2", + "word-wrap": "~1.2.3" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/os-tmpdir": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/os-tmpdir/-/os-tmpdir-1.0.2.tgz", + "integrity": "sha1-u+Z0BseaqFxc/sdm/lc0VV36EnQ=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/pako": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/pako/-/pako-1.0.11.tgz", + "integrity": "sha512-4hLB8Py4zZce5s4yd9XzopqwVv/yGNhV1Bl8NTmCq1763HeK2+EwVTv+leGeL13Dnh2wfbqowVPXCIO0z4taYw==" + }, + "node_modules/parent-module": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", + "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + "dependencies": { + "callsites": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/path-is-inside": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/path-is-inside/-/path-is-inside-1.0.2.tgz", + "integrity": "sha1-NlQX3t5EQw0cEa9hAn+s8HS9/FM=" + }, + "node_modules/path-key": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-2.0.1.tgz", + "integrity": "sha1-QRyttXTFoUDTpLGRDUDYDMn0C0A=", + "engines": { + "node": ">=4" + } + }, + "node_modules/pend": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/pend/-/pend-1.2.0.tgz", + "integrity": "sha1-elfrVQpng/kRUzH89GY9XI4AelA=" + }, + "node_modules/performance-now": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/performance-now/-/performance-now-2.1.0.tgz", + "integrity": "sha1-Ywn04OX6kT7BxpMHrjZLSzd8nns=" + }, + "node_modules/prelude-ls": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.1.2.tgz", + "integrity": "sha1-IZMqVJ9eUv/ZqCf1cOBL5iqX2lQ=", + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/process-nextick-args": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-1.0.7.tgz", + "integrity": "sha1-FQ4gt1ZZCtP5EJPyWk8q2L/zC6M=" + }, + "node_modules/progress": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/progress/-/progress-2.0.3.tgz", + "integrity": "sha512-7PiHtLll5LdnKIMw100I+8xJXR5gW2QwWYkT6iJva0bXitZKa/XMrSbdmg3r2Xnaidz9Qumd0VPaMrZlF9V9sA==", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/psl": { + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/psl/-/psl-1.8.0.tgz", + "integrity": "sha512-RIdOzyoavK+hA18OGGWDqUTsCLhtA7IcZ/6NCs4fFJaHBDab+pDDmDIByWFRQJq2Cd7r1OoQxBGKOaztq+hjIQ==" + }, + "node_modules/punycode": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.1.1.tgz", + "integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==", + "engines": { + "node": ">=6" + } + }, + "node_modules/q": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/q/-/q-1.5.1.tgz", + "integrity": "sha1-fjL3W0E4EpHQRhHxvxQQmsAGUdc=", + "engines": { + "node": ">=0.6.0", + "teleport": ">=0.2.0" + } + }, + "node_modules/qs": { + "version": "6.9.4", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.9.4.tgz", + "integrity": "sha512-A1kFqHekCTM7cz0udomYUoYNWjBebHm/5wzU/XqrBRBNWectVH0QIiN+NEcZ0Dte5hvzHwbr8+XQmguPhJ6WdQ==", + "engines": { + "node": ">=0.6" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/querystring": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/querystring/-/querystring-0.2.0.tgz", + "integrity": "sha1-sgmEkgO7Jd+CDadW50cAWHhSFiA=", + "deprecated": "The querystring API is considered Legacy. new code should use the URLSearchParams API instead.", + "engines": { + "node": ">=0.4.x" + } + }, + "node_modules/readable-stream": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.0.6.tgz", + "integrity": "sha1-j5A0HmilPMySh4jaz80Rs265t44=", + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.1", + "isarray": "~1.0.0", + "process-nextick-args": "~1.0.6", + "string_decoder": "~0.10.x", + "util-deprecate": "~1.0.1" + } + }, + "node_modules/readable-stream/node_modules/string_decoder": { + "version": "0.10.31", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz", + "integrity": "sha1-YuIDvEF2bGwoyfyEMB2rHFMQ+pQ=" + }, + "node_modules/readdir-glob": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/readdir-glob/-/readdir-glob-1.0.0.tgz", + "integrity": "sha512-km0DIcwQVZ1ZUhXhMWpF74/Wm5aFEd5/jDiVWF1Hkw2myPQovG8vCQ8+FQO2KXE9npQQvCnAMZhhWuUee4WcCQ==", + "dependencies": { + "minimatch": "^3.0.4" + } + }, + "node_modules/regexpp": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/regexpp/-/regexpp-2.0.1.tgz", + "integrity": "sha512-lv0M6+TkDVniA3aD1Eg0DVpfU/booSu7Eev3TDO/mZKHBfVjgCGTV4t4buppESEYDtkArYFOxTJWv6S5C+iaNw==", + "engines": { + "node": ">=6.5.0" + } + }, + "node_modules/request": { + "version": "2.88.2", + "resolved": "https://registry.npmjs.org/request/-/request-2.88.2.tgz", + "integrity": "sha512-MsvtOrfG9ZcrOwAW+Qi+F6HbD0CWXEh9ou77uOb7FM2WPhwT7smM833PzanhJLsgXjN89Ir6V2PczXNnMpwKhw==", + "deprecated": "request has been deprecated, see https://github.com/request/request/issues/3142", + "dependencies": { + "aws-sign2": "~0.7.0", + "aws4": "^1.8.0", + "caseless": "~0.12.0", + "combined-stream": "~1.0.6", + "extend": "~3.0.2", + "forever-agent": "~0.6.1", + "form-data": "~2.3.2", + "har-validator": "~5.1.3", + "http-signature": "~1.2.0", + "is-typedarray": "~1.0.0", + "isstream": "~0.1.2", + "json-stringify-safe": "~5.0.1", + "mime-types": "~2.1.19", + "oauth-sign": "~0.9.0", + "performance-now": "^2.1.0", + "qs": "~6.5.2", + "safe-buffer": "^5.1.2", + "tough-cookie": "~2.5.0", + "tunnel-agent": "^0.6.0", + "uuid": "^3.3.2" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/request/node_modules/qs": { + "version": "6.5.2", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.2.tgz", + "integrity": "sha512-N5ZAX4/LxJmF+7wN74pUD6qAh9/wnvdQcjq9TZjevvXzSUo7bfmw91saqMjzGS2xq91/odN2dW/WOl7qQHNDGA==", + "engines": { + "node": ">=0.6" + } + }, + "node_modules/resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "engines": { + "node": ">=4" + } + }, + "node_modules/restore-cursor": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-2.0.0.tgz", + "integrity": "sha1-n37ih/gv0ybU/RYpI9YhKe7g368=", + "dependencies": { + "onetime": "^2.0.0", + "signal-exit": "^3.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/rimraf": { + "version": "2.6.3", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.6.3.tgz", + "integrity": "sha512-mwqeW5XsA2qAejG46gYdENaxXjx9onRNCfn7L0duuP4hCuTIi/QO7PDK07KJfp1d+izWPrzEJDcSqBa0OZQriA==", + "dependencies": { + "glob": "^7.1.3" + }, + "bin": { + "rimraf": "bin.js" + } + }, + "node_modules/run-async": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/run-async/-/run-async-2.4.1.tgz", + "integrity": "sha512-tvVnVv01b8c1RrA6Ep7JkStj85Guv/YrMcwqYQnwjsAS2cTmmPGBBjAjpCW7RrSodNSoE2/qg9O4bceNvUuDgQ==", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/rxjs": { + "version": "6.6.2", + "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-6.6.2.tgz", + "integrity": "sha512-BHdBMVoWC2sL26w//BCu3YzKT4s2jip/WhwsGEDmeKYBhKDZeYezVUnHatYB7L85v5xs0BAQmg6BEYJEKxBabg==", + "dependencies": { + "tslib": "^1.9.0" + }, + "engines": { + "npm": ">=2.0.0" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" + }, + "node_modules/set-immediate-shim": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/set-immediate-shim/-/set-immediate-shim-1.0.1.tgz", + "integrity": "sha1-SysbJ+uAip+NzEgaWOXlb1mfP2E=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/shebang-command": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-1.2.0.tgz", + "integrity": "sha1-RKrGW2lbAzmJaMOfNj/uXer98eo=", + "dependencies": { + "shebang-regex": "^1.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/shebang-regex": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-1.0.0.tgz", + "integrity": "sha1-2kL0l0DAtC2yypcoVxyxkMmO/qM=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/signal-exit": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.3.tgz", + "integrity": "sha512-VUJ49FC8U1OxwZLxIbTTrDvLnf/6TDgxZcK8wxR8zs13xpx7xbG60ndBlhNrFi2EMuFRoeDoJO7wthSLq42EjA==" + }, + "node_modules/slice-ansi": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-2.1.0.tgz", + "integrity": "sha512-Qu+VC3EwYLldKa1fCxuuvULvSJOKEgk9pi8dZeCVK7TqBfUNTH4sFkk4joj8afVSfAYgJoSOetjx9QWOJ5mYoQ==", + "dependencies": { + "ansi-styles": "^3.2.0", + "astral-regex": "^1.0.0", + "is-fullwidth-code-point": "^2.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/sprintf-js": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", + "integrity": "sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw=" + }, + "node_modules/sshpk": { + "version": "1.16.1", + "resolved": "https://registry.npmjs.org/sshpk/-/sshpk-1.16.1.tgz", + "integrity": "sha512-HXXqVUq7+pcKeLqqZj6mHFUMvXtOJt1uoUx09pFW6011inTMxqI8BA8PM95myrIyyKwdnzjdFjLiE6KBPVtJIg==", + "dependencies": { + "asn1": "~0.2.3", + "assert-plus": "^1.0.0", + "bcrypt-pbkdf": "^1.0.0", + "dashdash": "^1.12.0", + "ecc-jsbn": "~0.1.1", + "getpass": "^0.1.1", + "jsbn": "~0.1.0", + "safer-buffer": "^2.0.2", + "tweetnacl": "~0.14.0" + }, + "bin": { + "sshpk-conv": "bin/sshpk-conv", + "sshpk-sign": "bin/sshpk-sign", + "sshpk-verify": "bin/sshpk-verify" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/string_decoder": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", + "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", + "dependencies": { + "safe-buffer": "~5.2.0" + } + }, + "node_modules/string-width": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-2.1.1.tgz", + "integrity": "sha512-nOqH59deCq9SRHlxq1Aw85Jnt4w6KvLKqWVik6oA9ZklXLNIOlqg4F2yrT1MVaTjAqvVwdfeZ7w7aCvJD7ugkw==", + "dependencies": { + "is-fullwidth-code-point": "^2.0.0", + "strip-ansi": "^4.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/string.prototype.trimend": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.1.tgz", + "integrity": "sha512-LRPxFUaTtpqYsTeNKaFOw3R4bxIzWOnbQ837QfBylo8jIxtcbK/A/sMV7Q+OAV/vWo+7s25pOE10KYSjaSO06g==", + "dependencies": { + "define-properties": "^1.1.3", + "es-abstract": "^1.17.5" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/string.prototype.trimstart": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/string.prototype.trimstart/-/string.prototype.trimstart-1.0.1.tgz", + "integrity": "sha512-XxZn+QpvrBI1FOcg6dIpxUPgWCPuNXvMD72aaRaUQv1eD4e/Qy8i/hFTe0BUmD60p/QA6bh1avmuPTfNjqVWRw==", + "dependencies": { + "define-properties": "^1.1.3", + "es-abstract": "^1.17.5" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/strip-ansi": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-4.0.0.tgz", + "integrity": "sha1-qEeQIusaw2iocTibY1JixQXuNo8=", + "dependencies": { + "ansi-regex": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/strip-json-comments": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", + "integrity": "sha1-PFMZQukIwml8DsNEhYwobHygpgo=", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/supports-color": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", + "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "dependencies": { + "has-flag": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/table": { + "version": "5.4.6", + "resolved": "https://registry.npmjs.org/table/-/table-5.4.6.tgz", + "integrity": "sha512-wmEc8m4fjnob4gt5riFRtTu/6+4rSe12TpAELNSqHMfF3IqnA+CH37USM6/YR3qRZv7e56kAEAtd6nKZaxe0Ug==", + "dependencies": { + "ajv": "^6.10.2", + "lodash": "^4.17.14", + "slice-ansi": "^2.1.0", + "string-width": "^3.0.0" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/table/node_modules/ansi-regex": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz", + "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==", + "engines": { + "node": ">=6" + } + }, + "node_modules/table/node_modules/string-width": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz", + "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==", + "dependencies": { + "emoji-regex": "^7.0.1", + "is-fullwidth-code-point": "^2.0.0", + "strip-ansi": "^5.1.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/table/node_modules/strip-ansi": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz", + "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==", + "dependencies": { + "ansi-regex": "^4.1.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/tar": { + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/tar/-/tar-2.2.2.tgz", + "integrity": "sha512-FCEhQ/4rE1zYv9rYXJw/msRqsnmlje5jHP6huWeBZ704jUTy02c5AZyWujpMR1ax6mVw9NyJMfuK2CMDWVIfgA==", + "deprecated": "This version of tar is no longer supported, and will not receive security updates. Please upgrade asap.", + "dependencies": { + "block-stream": "*", + "fstream": "^1.0.12", + "inherits": "2" + } + }, + "node_modules/tar-stream": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-2.1.3.tgz", + "integrity": "sha512-Z9yri56Dih8IaK8gncVPx4Wqt86NDmQTSh49XLZgjWpGZL9GK9HKParS2scqHCC4w6X9Gh2jwaU45V47XTKwVA==", + "dependencies": { + "bl": "^4.0.1", + "end-of-stream": "^1.4.1", + "fs-constants": "^1.0.0", + "inherits": "^2.0.3", + "readable-stream": "^3.1.1" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/tar-stream/node_modules/readable-stream": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz", + "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/tar.gz": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/tar.gz/-/tar.gz-1.0.7.tgz", + "integrity": "sha512-uhGatJvds/3diZrETqMj4RxBR779LKlIE74SsMcn5JProZsfs9j0QBwWO1RW+IWNJxS2x8Zzra1+AW6OQHWphg==", + "deprecated": "⚠️ WARNING ⚠️ tar.gz module has been deprecated and your application is vulnerable. Please use tar module instead: https://npmjs.com/tar", + "dependencies": { + "bluebird": "^2.9.34", + "commander": "^2.8.1", + "fstream": "^1.0.8", + "mout": "^0.11.0", + "tar": "^2.1.1" + }, + "bin": { + "targz": "bin/targz" + } + }, + "node_modules/text-table": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", + "integrity": "sha1-f17oI66AUgfACvLfSoTsP8+lcLQ=" + }, + "node_modules/through": { + "version": "2.3.8", + "resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz", + "integrity": "sha1-DdTJ/6q8NXlgsbckEV1+Doai4fU=" + }, + "node_modules/tmp": { + "version": "0.0.33", + "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.0.33.tgz", + "integrity": "sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw==", + "dependencies": { + "os-tmpdir": "~1.0.2" + }, + "engines": { + "node": ">=0.6.0" + } + }, + "node_modules/tough-cookie": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-2.5.0.tgz", + "integrity": "sha512-nlLsUzgm1kfLXSXfRZMc1KLAugd4hqJHDTvc2hDIwS3mZAfMEuMbc03SujMF+GEcpaX/qboeycw6iO8JwVv2+g==", + "dependencies": { + "psl": "^1.1.28", + "punycode": "^2.1.1" + }, + "engines": { + "node": ">=0.8" + } + }, + "node_modules/tslib": { + "version": "1.13.0", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.13.0.tgz", + "integrity": "sha512-i/6DQjL8Xf3be4K/E6Wgpekn5Qasl1usyw++dAA35Ue5orEn65VIxOA+YvNNl9HV3qv70T7CNwjODHZrLwvd1Q==" + }, + "node_modules/tunnel": { + "version": "0.0.6", + "resolved": "https://registry.npmjs.org/tunnel/-/tunnel-0.0.6.tgz", + "integrity": "sha512-1h/Lnq9yajKY2PEbBadPXj3VxsDDu844OnaAo52UVmIzIvwwtBPIuNvkjuzBlTWpfJyUbG3ez0KSBibQkj4ojg==", + "engines": { + "node": ">=0.6.11 <=0.7.0 || >=0.7.3" + } + }, + "node_modules/tunnel-agent": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz", + "integrity": "sha1-J6XeoGs2sEoKmWZ3SykIaPD8QP0=", + "dependencies": { + "safe-buffer": "^5.0.1" + }, + "engines": { + "node": "*" + } + }, + "node_modules/tweetnacl": { + "version": "0.14.5", + "resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-0.14.5.tgz", + "integrity": "sha1-WuaBd/GS1EViadEIr6k/+HQ/T2Q=" + }, + "node_modules/type-check": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.3.2.tgz", + "integrity": "sha1-WITKtRLPHTVeP7eE8wgEsrUg23I=", + "dependencies": { + "prelude-ls": "~1.1.2" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/typed-rest-client": { + "version": "1.7.3", + "resolved": "https://registry.npmjs.org/typed-rest-client/-/typed-rest-client-1.7.3.tgz", + "integrity": "sha512-CwTpx/TkRHGZoHkJhBcp4X8K3/WtlzSHVQR0OIFnt10j4tgy4ypgq/SrrgVpA1s6tAL49Q6J3R5C0Cgfh2ddqA==", + "dependencies": { + "qs": "^6.9.1", + "tunnel": "0.0.6", + "underscore": "1.8.3" + } + }, + "node_modules/typescript": { + "version": "3.9.7", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-3.9.7.tgz", + "integrity": "sha512-BLbiRkiBzAwsjut4x/dsibSTB6yWpwT5qWmC2OfuCg3GgVQCSgMs4vEctYPhsaGtd0AeuuHMkjZ2h2WG8MSzRw==", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=4.2.0" + } + }, + "node_modules/underscore": { + "version": "1.8.3", + "resolved": "https://registry.npmjs.org/underscore/-/underscore-1.8.3.tgz", + "integrity": "sha1-Tz+1OxBuYJf8+ctBCfKl6b36UCI=" + }, + "node_modules/uri-js": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.2.2.tgz", + "integrity": "sha512-KY9Frmirql91X2Qgjry0Wd4Y+YTdrdZheS8TFwvkbLWf/G5KNJDCh6pKL5OZctEW4+0Baa5idK2ZQuELRwPznQ==", + "dependencies": { + "punycode": "^2.1.0" + } + }, + "node_modules/util": { + "version": "0.12.3", + "resolved": "https://registry.npmjs.org/util/-/util-0.12.3.tgz", + "integrity": "sha512-I8XkoQwE+fPQEhy9v012V+TSdH2kp9ts29i20TaaDUXsg7x/onePbhFJUExBfv/2ay1ZOp/Vsm3nDlmnFGSAog==", + "dependencies": { + "inherits": "^2.0.3", + "is-arguments": "^1.0.4", + "is-generator-function": "^1.0.7", + "is-typed-array": "^1.1.3", + "safe-buffer": "^5.1.2", + "which-typed-array": "^1.1.2" + } + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=" + }, + "node_modules/uuid": { + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.4.0.tgz", + "integrity": "sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==", + "deprecated": "Please upgrade to version 7 or higher. Older versions may use Math.random() in certain circumstances, which is known to be problematic. See https://v8.dev/blog/math-random for details.", + "bin": { + "uuid": "bin/uuid" + } + }, + "node_modules/validator": { + "version": "9.4.1", + "resolved": "https://registry.npmjs.org/validator/-/validator-9.4.1.tgz", + "integrity": "sha512-YV5KjzvRmSyJ1ee/Dm5UED0G+1L4GZnLN3w6/T+zZm8scVua4sOhYKWTUrKa0H/tMiJyO9QLHMPN+9mB/aMunA==", + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/verror": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/verror/-/verror-1.10.0.tgz", + "integrity": "sha1-OhBcoXBTr1XW4nDB+CiGguGNpAA=", + "engines": [ + "node >=0.6.0" + ], + "dependencies": { + "assert-plus": "^1.0.0", + "core-util-is": "1.0.2", + "extsprintf": "^1.2.0" + } + }, + "node_modules/which": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", + "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "which": "bin/which" + } + }, + "node_modules/which-typed-array": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/which-typed-array/-/which-typed-array-1.1.2.tgz", + "integrity": "sha512-KT6okrd1tE6JdZAy3o2VhMoYPh3+J6EMZLyrxBQsZflI1QCZIxMrIYLkosd8Twf+YfknVIHmYQPgJt238p8dnQ==", + "dependencies": { + "available-typed-arrays": "^1.0.2", + "es-abstract": "^1.17.5", + "foreach": "^2.0.5", + "function-bind": "^1.1.1", + "has-symbols": "^1.0.1", + "is-typed-array": "^1.1.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/word-wrap": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.3.tgz", + "integrity": "sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=" + }, + "node_modules/write": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/write/-/write-1.0.3.tgz", + "integrity": "sha512-/lg70HAjtkUgWPVZhZcm+T4hkL8Zbtp1nFNOn3lRrxnlv50SRBv7cR7RqR+GMsd3hUXy9hWBo4CHTbFTcOYwig==", + "dependencies": { + "mkdirp": "^0.5.1" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/yauzl": { + "version": "2.10.0", + "resolved": "https://registry.npmjs.org/yauzl/-/yauzl-2.10.0.tgz", + "integrity": "sha1-x+sXyT4RLLEIb6bY5R+wZnt5pfk=", + "dependencies": { + "buffer-crc32": "~0.2.3", + "fd-slicer": "~1.1.0" + } + }, + "node_modules/yazl": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/yazl/-/yazl-2.5.1.tgz", + "integrity": "sha512-phENi2PLiHnHb6QBVot+dJnaAZ0xosj7p3fWl+znIjBDlnMI2PsZCJZ306BPTFOaHf5qdDEI8x5qFrSOBN5vrw==", + "dependencies": { + "buffer-crc32": "~0.2.3" + } + }, + "node_modules/zip-lib": { + "version": "0.7.1", + "resolved": "https://registry.npmjs.org/zip-lib/-/zip-lib-0.7.1.tgz", + "integrity": "sha512-Ld/ERTsce14KkMZ7indAVrHDd+jYWPWEzsJZI51cRAAMsGli8xfdsnz45PsHCBWXCQbovYER+1xKVicJs2tO6Q==", + "dependencies": { + "yauzl": "^2.10.0", + "yazl": "^2.5.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/zip-stream": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/zip-stream/-/zip-stream-4.0.2.tgz", + "integrity": "sha512-TGxB2g+1ur6MHkvM644DuZr8Uzyz0k0OYWtS3YlpfWBEmK4woaC2t3+pozEL3dBfIPmpgmClR5B2QRcMgGt22g==", + "dependencies": { + "archiver-utils": "^2.1.0", + "compress-commons": "^4.0.0", + "readable-stream": "^3.6.0" + }, + "engines": { + "node": ">= 10" + } + }, + "node_modules/zip-stream/node_modules/readable-stream": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz", + "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + } + }, "dependencies": { "@actions/core": { "version": "1.2.4", @@ -63,7 +2546,8 @@ "acorn-jsx": { "version": "5.2.0", "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.2.0.tgz", - "integrity": "sha512-HiUX/+K2YpkpJ+SzBffkM/AQ2YE03S0U1kjTLVpoJdhZMOWy8qvXVN9JdLqv2QsaQ6MPYQIuNmwD8zOiYUofLQ==" + "integrity": "sha512-HiUX/+K2YpkpJ+SzBffkM/AQ2YE03S0U1kjTLVpoJdhZMOWy8qvXVN9JdLqv2QsaQ6MPYQIuNmwD8zOiYUofLQ==", + "requires": {} }, "ajv": { "version": "6.12.3", @@ -1622,6 +4106,14 @@ "tweetnacl": "~0.14.0" } }, + "string_decoder": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", + "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", + "requires": { + "safe-buffer": "~5.2.0" + } + }, "string-width": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/string-width/-/string-width-2.1.1.tgz", @@ -1649,14 +4141,6 @@ "es-abstract": "^1.17.5" } }, - "string_decoder": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", - "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", - "requires": { - "safe-buffer": "~5.2.0" - } - }, "strip-ansi": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-4.0.0.tgz", diff --git a/src/AzureImageBuilderClient.ts b/src/AzureImageBuilderClient.ts index 12139c1c..5702394e 100644 --- a/src/AzureImageBuilderClient.ts +++ b/src/AzureImageBuilderClient.ts @@ -60,8 +60,30 @@ export default class ImageBuilderClient { throw Error(`Submit template call failed for template ${templateName} with error: ${JSON.stringify(error)}`); } } + public async getRunTemplate(templateName: string, subscriptionId: string){ + try { + let httpRequest = { + method: 'GET', + uri: this._client.getRequestUri(`/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.VirtualMachineImages/imagetemplates/{imageTemplateName}`, { '{subscriptionId}': subscriptionId, '{resourceGroupName}': this._taskParameters.resourceGroupName, '{imageTemplateName}': templateName }, [], apiVersion) + }; + var response = await this._client.beginRequest(httpRequest); - public async runTemplate(templateName: string, subscriptionId: string, timeOutInMinutes: number) { + if (response.statusCode == 202) { + response = await this.getLongRunningOperationResult(response); + } + if (response.statusCode != 200 || response.body.status == "Failed") { + throw ToError(response); + } + if (response.statusCode == 200 && response.body && response.body.status == "Succeeded") { + console.log("Run template: \n", response.body.status); + } + return response + } + catch (error) { + throw Error(`Post template call failed for template ${templateName} with error: ${JSON.stringify(error)}`); + } + } + public async runTemplate(templateName: string, subscriptionId: string,timeOutInMinutes: number) { try { console.log("Starting run template..."); let httpRequest: WebRequest = { @@ -70,8 +92,13 @@ export default class ImageBuilderClient { }; var response = await this._client.beginRequest(httpRequest); + if (response.statusCode == 202) { - response = await this.getLongRunningOperationResult(response, timeOutInMinutes); + if (this._taskParameters.actionRunMode == "nowait"){ + console.log("Action Run Mode set to NoWait. Skipping wait\n"); + return + } + response = await this.getLongRunningOperationResult(response, timeOutInMinutes, templateName, subscriptionId); } if (response.statusCode != 200 || response.body.status == "Failed") { throw ToError(response); @@ -135,7 +162,7 @@ export default class ImageBuilderClient { return output; } - public async getLongRunningOperationResult(response: WebResponse, timeoutInMinutes?: number): Promise { + public async getLongRunningOperationResult(response: WebResponse, timeoutInMinutes?: number, templateName: string, subscriptionId: string): Promise { var longRunningOperationRetryTimeout = !!timeoutInMinutes ? timeoutInMinutes : 0; timeoutInMinutes = timeoutInMinutes || longRunningOperationRetryTimeout; var timeout = new Date().getTime() + timeoutInMinutes * 60 * 1000; @@ -161,6 +188,27 @@ export default class ImageBuilderClient { if (!waitIndefinitely && timeout < new Date().getTime()) { throw Error(`error in url`); } + if (this._taskParameters.actionRunMode == "distro" && templateName && subscriptionId){ + try{ + let runTemplate_result = null + try{ + runTemplate_result = await this.getRunTemplate(templateName, subscriptionId).then(result=> (runTemplate_result = result)) + + if (!runTemplate_result.body.properties && !runTemplate_result.body.properties.lastRunStatus){ + if (runTemplate_result.properties.lastRunStatus.runSubState.toLowerCase() == "distributing"){ + console.log("Template is distributing set to break") + return runTemplate_result + } + } + } + catch(err){ + console.log(err) + } + } + catch(err){ + console.log(err) + } + } var sleepDuration = 15; await this.sleepFor(sleepDuration); } else { diff --git a/src/ImageBuilder.ts b/src/ImageBuilder.ts index b625feb3..ddddb392 100644 --- a/src/ImageBuilder.ts +++ b/src/ImageBuilder.ts @@ -78,7 +78,10 @@ export default class ImageBuilder { else { var template = JSON.parse(this._taskParameters.templateJsonFromUser); this._taskParameters.location = template.location; - } + } + + this.templateName = this.getTemplateName(); + var runOutputName = this.getRunoutputName(); console.log("Using Managed Identity " + this.idenityName); var blobUrl = ""; @@ -116,18 +119,28 @@ export default class ImageBuilder { await this._aibClient.putImageTemplate(templateStr, this.templateName, subscriptionId); this.imgBuilderTemplateExists = true; - await this._aibClient.runTemplate(this.templateName, subscriptionId, this._taskParameters.buildTimeoutInMinutes); - var out = await this._aibClient.getRunOutput(this.templateName, runOutputName, subscriptionId); + await this._aibClient.runTemplate(this.templateName, subscriptionId, this._taskParameters.buildTimeoutInMinutes); var templateID = await this._aibClient.getTemplateId(this.templateName, subscriptionId); - var imagebuilderRunStatus = "failed"; - core.setOutput('templateName', this.templateName); - core.setOutput('templateId', templateID); - core.setOutput('run-output-name', runOutputName); - if (out) { + + if (this._taskParameters.actionRunMode !== "nowait"){ + var out = await this._aibClient.getRunOutput(this.templateName, runOutputName, subscriptionId); + var imagebuilderRunStatus = "failed"; + core.setOutput('templateName', this.templateName); + core.setOutput('templateId', templateID); + core.setOutput('run-output-name', runOutputName); + if (out) { + core.setOutput('custom-image-uri', out); + core.setOutput('imagebuilder-run-status', "succeeded"); + imagebuilderRunStatus = "succeeded"; + } + } + else{ + out = "" core.setOutput('custom-image-uri', out); - core.setOutput('imagebuilder-run-status', "succeeded"); - imagebuilderRunStatus = "succeeded"; + core.setOutput('imagebuilder-run-status', "skipped"); + imagebuilderRunStatus = "skipped"; } + if (Utils.IsEqual(templateJson.properties.source.type, "PlatformImage")) { core.setOutput('pirPublisher', templateJson.properties.source.publisher); @@ -152,6 +165,10 @@ export default class ImageBuilder { finally { var outStream = await this.executeAzCliCommand(`group exists -n ${this._taskParameters.resourceGroupName}`); if (outStream) { + if (imagebuilderRunStatus != "failed" && (this._taskParameters.actionRunMode == "nowait" || this._taskParameters.actionRunMode == "distro")){ + console.log("skipping cleanup action run mode set to nowait or distro") + return + } this.cleanup(subscriptionId); } } diff --git a/src/constants.ts b/src/constants.ts index f72adc0c..17d2646b 100644 --- a/src/constants.ts +++ b/src/constants.ts @@ -1,3 +1,4 @@ +export var ActionRunMode = "action-run-mode"; export var Location = "location"; export var ResourceGroupName = "resource-group-name"; export var ImageBuilderTemplateName = "image-builder-template-name"; From 4a0f06f193b4d81e1a644e5e943ff69a9f1b8ddd Mon Sep 17 00:00:00 2001 From: Ron Truex Date: Mon, 4 Apr 2022 14:11:08 -0700 Subject: [PATCH 03/69] added to delete storage once image starts distributing --- README.md | 4 ++-- action.yml | 2 +- lib/AzureImageBuilderClient.js | 2 +- lib/ImageBuilder.js | 6 +++--- src/AzureImageBuilderClient.ts | 2 +- src/ImageBuilder.ts | 6 +++--- src/TaskParameters.ts | 9 +++++++++ 7 files changed, 20 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index 9db4f62d..444aedc7 100644 --- a/README.md +++ b/README.md @@ -45,8 +45,8 @@ Learn more about configuring permissions for Azure Image builder Service using [ * Full * default * Wait until everything completes - * Distro - * Wait until the template starts the distro process + * BuildOnly + * Waits until the template starts the distribute process * NoWait * This will not wait for actions to complete, like run template * It will try to get the action to complete the quicket but can require the most manual cleanup diff --git a/action.yml b/action.yml index 100e2497..8e092ab3 100644 --- a/action.yml +++ b/action.yml @@ -3,7 +3,7 @@ description: "Create custom virtual machine images that contain artifacts built inputs: #action inputs action-run-mode: - description: 'The run mode for the action. Full (default) wait until everything completes, Distro wait until the template starts the distro process, NoWait as soon as the action sets up and triggers the action it will quit' + description: 'The run mode for the action. Full (default) wait until everything completes, BuildOnly waits until the template starts the didistribute stro process, NoWait as soon as the action sets up and triggers the action it will quit' required: true default: full diff --git a/lib/AzureImageBuilderClient.js b/lib/AzureImageBuilderClient.js index fc8297f0..33304f87 100644 --- a/lib/AzureImageBuilderClient.js +++ b/lib/AzureImageBuilderClient.js @@ -214,7 +214,7 @@ class ImageBuilderClient { if (!waitIndefinitely && timeout < new Date().getTime()) { throw Error(`error in url`); } - if (this._taskParameters.actionRunMode == "distro" && templateName && subscriptionId){ + if (this._taskParameters.actionRunMode == "buildonly" && templateName && subscriptionId){ try{ let runTemplate_result = null try{ diff --git a/lib/ImageBuilder.js b/lib/ImageBuilder.js index 13267304..c740a495 100644 --- a/lib/ImageBuilder.js +++ b/lib/ImageBuilder.js @@ -175,8 +175,8 @@ class ImageBuilder { finally { var outStream = yield this.executeAzCliCommand(`group exists -n ${this._taskParameters.resourceGroupName}`); if (outStream) { - if (imagebuilderRunStatus != "failed" && (this._taskParameters.actionRunMode == "nowait" || this._taskParameters.actionRunMode == "distro")){ - console.log("skipping cleanup action run mode set to nowait or distro") + if (imagebuilderRunStatus != "failed" && (this._taskParameters.actionRunMode == "nowait")){ + console.log("skipping cleanup action run mode set to nowait") return } this.cleanup(subscriptionId); @@ -384,7 +384,7 @@ class ImageBuilder { cleanup(subscriptionId) { return __awaiter(this, void 0, void 0, function* () { try { - if (!this.isVhdDistribute && this.imgBuilderTemplateExists) { + if (!this.isVhdDistribute && this.imgBuilderTemplateExists && this._taskParameters.actionRunMode == "full") { yield this._aibClient.deleteTemplate(this.templateName, subscriptionId); console.log(`${this.templateName} got deleted`); } diff --git a/src/AzureImageBuilderClient.ts b/src/AzureImageBuilderClient.ts index 5702394e..c9701df2 100644 --- a/src/AzureImageBuilderClient.ts +++ b/src/AzureImageBuilderClient.ts @@ -188,7 +188,7 @@ export default class ImageBuilderClient { if (!waitIndefinitely && timeout < new Date().getTime()) { throw Error(`error in url`); } - if (this._taskParameters.actionRunMode == "distro" && templateName && subscriptionId){ + if (this._taskParameters.actionRunMode == "buildonly" && templateName && subscriptionId){ try{ let runTemplate_result = null try{ diff --git a/src/ImageBuilder.ts b/src/ImageBuilder.ts index ddddb392..01b2760c 100644 --- a/src/ImageBuilder.ts +++ b/src/ImageBuilder.ts @@ -165,8 +165,8 @@ export default class ImageBuilder { finally { var outStream = await this.executeAzCliCommand(`group exists -n ${this._taskParameters.resourceGroupName}`); if (outStream) { - if (imagebuilderRunStatus != "failed" && (this._taskParameters.actionRunMode == "nowait" || this._taskParameters.actionRunMode == "distro")){ - console.log("skipping cleanup action run mode set to nowait or distro") + if (imagebuilderRunStatus != "failed" && (this._taskParameters.actionRunMode == "nowait" || this._taskParameters.actionRunMode == "buildonly")){ + console.log("skipping cleanup action run mode set to nowait or buildonly") return } this.cleanup(subscriptionId); @@ -390,7 +390,7 @@ export default class ImageBuilder { private async cleanup(subscriptionId: string) { try { - if (!this.isVhdDistribute && this.imgBuilderTemplateExists) { + if (!this.isVhdDistribute && this.imgBuilderTemplateExists && this._taskParameters.actionRunMode == "full")) { await this._aibClient.deleteTemplate(this.templateName, subscriptionId); console.log(`${this.templateName} got deleted`); } diff --git a/src/TaskParameters.ts b/src/TaskParameters.ts index 92b698d8..7bc46a2d 100644 --- a/src/TaskParameters.ts +++ b/src/TaskParameters.ts @@ -7,6 +7,7 @@ var fs = require('fs'); export default class TaskParameters { // image builder inputs + public actionRunMode: string = ""; public resourceGroupName: string = ""; public location: string = ""; public imagebuilderTemplateName: string; @@ -51,6 +52,13 @@ export default class TaskParameters { console.log("start reading task parameters..."); + this.actionRunMode = tl.getInput(constants.ActionRunMode, { required: true }).toLowerCase(); + if (!this.actionRunMode){ + this.actionRunMode = "full" + } + + console.log(`Action run mode set: ${this.actionRunMode}`) + this.imagebuilderTemplateName = tl.getInput(constants.ImageBuilderTemplateName); if (this.imagebuilderTemplateName.indexOf(".json") > -1) { this.isTemplateJsonProvided = true; @@ -59,6 +67,7 @@ export default class TaskParameters { } this.resourceGroupName = tl.getInput(constants.ResourceGroupName, { required: true }); + this.buildTimeoutInMinutes = parseInt(tl.getInput(constants.BuildTimeoutInMinutes)); this.sourceOSType = tl.getInput(constants.SourceOSType, { required: true }); if (Utils.IsEqual(this.sourceOSType, "windows")) { From bc9168444a67b284dfdb79e2f9a231ba96d44c68 Mon Sep 17 00:00:00 2001 From: Ron Truex Date: Mon, 4 Apr 2022 18:29:47 -0700 Subject: [PATCH 04/69] adding build process --- .github/workflows/build_version.yaml | 63 ++++++++++++++++++++++++++++ dist/index.js | 0 2 files changed, 63 insertions(+) create mode 100644 .github/workflows/build_version.yaml create mode 100644 dist/index.js diff --git a/.github/workflows/build_version.yaml b/.github/workflows/build_version.yaml new file mode 100644 index 00000000..e9c6cc7b --- /dev/null +++ b/.github/workflows/build_version.yaml @@ -0,0 +1,63 @@ +name: Build and Update Version Tag + +on: + push: + branches: + - 'main' + +jobs: + tag_new_version: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + + - uses: actions/setup-node@v3 + with: + node-version: '12' + cache: 'npm' + + - name: 'Install required packages' + shell: bash + run: | + cd ${{ GITHUB.WORKSPACE }}/ + npm install + npm i -g @vercel/ncc + + ncc build index.js --license LICENSE + + - name: Check for modified files + id: git-check + run: echo ::set-output name=modified::$(if [ -n "$(git status --porcelain)" ]; then echo "true"; else echo "false"; fi) + + - name: Commit formatted changes + if: steps.git-check.outputs.modified == 'true' + run: | + git config --global user.name 'Azure Image Builder Bot' + git config --global user.email 'azure-image-builder-bot@users.noreply.github.com' + git commit -am "Auto Build GH Action" + git push + + - name: Install GitVersion + uses: gittools/actions/gitversion/setup@v0.9.7 + with: + versionSpec: '5.x' + + - name: Determine Version + id: gitversion + uses: gittools/actions/gitversion/execute@v0.9.7 + + - name: Display SemVer + run: | + echo "SemVer: ${{ steps.gitversion.outputs.semVer }}" + + - name: Create Git tag for PR + uses: actions/github-script@v5 + with: + script: | + github.rest.git.createRef({ + owner: context.repo.owner, + repo: context.repo.repo, + ref: "refs/tags/v${{ steps.gitversion.outputs.semVer }}", + sha: context.sha + }) diff --git a/dist/index.js b/dist/index.js new file mode 100644 index 00000000..e69de29b From 423034a26302c69d197ed5b8b0a3b7acd29aee44 Mon Sep 17 00:00:00 2001 From: Ron Truex Date: Mon, 4 Apr 2022 18:49:54 -0700 Subject: [PATCH 05/69] small update to action --- .github/workflows/build_version.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_version.yaml b/.github/workflows/build_version.yaml index e9c6cc7b..2ef27b00 100644 --- a/.github/workflows/build_version.yaml +++ b/.github/workflows/build_version.yaml @@ -14,7 +14,7 @@ jobs: - uses: actions/setup-node@v3 with: - node-version: '12' + node-version: '14' cache: 'npm' - name: 'Install required packages' From faa3b729ba58966004b8e511fd09e4c2725cbea4 Mon Sep 17 00:00:00 2001 From: Ron Truex Date: Mon, 4 Apr 2022 18:53:02 -0700 Subject: [PATCH 06/69] small update to action --- .github/workflows/build_version.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_version.yaml b/.github/workflows/build_version.yaml index 2ef27b00..e2992077 100644 --- a/.github/workflows/build_version.yaml +++ b/.github/workflows/build_version.yaml @@ -14,7 +14,7 @@ jobs: - uses: actions/setup-node@v3 with: - node-version: '14' + node-version: '16' cache: 'npm' - name: 'Install required packages' From ba8204f4e8a8cdd119c128a6688e6293163c0fe9 Mon Sep 17 00:00:00 2001 From: Ron Truex Date: Mon, 4 Apr 2022 19:53:36 -0700 Subject: [PATCH 07/69] package updates --- package-lock.json | 459 ++++++++++++++++++++++++---------------------- package.json | 5 +- 2 files changed, 246 insertions(+), 218 deletions(-) diff --git a/package-lock.json b/package-lock.json index 32453cc0..6ef7b0c4 100644 --- a/package-lock.json +++ b/package-lock.json @@ -19,16 +19,18 @@ "azure-storage": "^2.10.3", "eslint": "^5.16.0", "jszip": "^3.5.0", - "tar.gz": "^1.0.7", + "tar": "^6.1.11", "typescript": "^3.9.7", "zip-lib": "^0.7.1" - }, - "devDependencies": {} + } }, "node_modules/@actions/core": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/@actions/core/-/core-1.2.4.tgz", - "integrity": "sha512-YJCEq8BE3CdN8+7HPZ/4DxJjk/OkZV2FFIf+DlZTC/4iBlzYCD5yjRR6eiOS5llO11zbRltIRuKAjMKaWTE6cg==" + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/@actions/core/-/core-1.6.0.tgz", + "integrity": "sha512-NB1UAZomZlCV/LmJqkLhNTqtKfFXJZAUPcfl/zqG7EfsQdeUJtaWO98SGbuQ3pydJ3fHl2CvI/51OKYlCYYcaw==", + "dependencies": { + "@actions/http-client": "^1.0.11" + } }, "node_modules/@actions/exec": { "version": "1.0.4", @@ -38,6 +40,14 @@ "@actions/io": "^1.0.1" } }, + "node_modules/@actions/http-client": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/@actions/http-client/-/http-client-1.0.11.tgz", + "integrity": "sha512-VRYHGQV1rqnROJqdMvGUbY/Kn8vriQe/F9HR2AlYHzmKuM/p3kjNuXhmdBfcVgsvRWTz5C5XW5xvndZrVBuAYg==", + "dependencies": { + "tunnel": "0.0.6" + } + }, "node_modules/@actions/io": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/@actions/io/-/io-1.0.2.tgz", @@ -119,9 +129,9 @@ } }, "node_modules/ansi-regex": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.0.tgz", - "integrity": "sha1-7QMXwyIGT3lGbAKWa922Bas32Zg=", + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.1.tgz", + "integrity": "sha512-+O9Jct8wf++lXxxFc4hc8LsjaSq0HFzzL7cVsw8pRDIPdjKD2mT4ytDZlLuSBZ4cLKZFXIrMGO7DbQCtMJJMKw==", "engines": { "node": ">=4" } @@ -277,21 +287,22 @@ } }, "node_modules/azure-storage": { - "version": "2.10.3", - "resolved": "https://registry.npmjs.org/azure-storage/-/azure-storage-2.10.3.tgz", - "integrity": "sha512-IGLs5Xj6kO8Ii90KerQrrwuJKexLgSwYC4oLWmc11mzKe7Jt2E5IVg+ZQ8K53YWZACtVTMBNO3iGuA+4ipjJxQ==", + "version": "2.10.7", + "resolved": "https://registry.npmjs.org/azure-storage/-/azure-storage-2.10.7.tgz", + "integrity": "sha512-4oeFGtn3Ziw/fGs/zkoIpKKtygnCVIcZwzJ7UQzKTxhkGQqVCByOFbYqMGYR3L+wOsunX9lNfD0jc51SQuKSSA==", "deprecated": "Please note: newer packages @azure/storage-blob, @azure/storage-queue and @azure/storage-file are available as of November 2019 and @azure/data-tables is available as of June 2021. While the legacy azure-storage package will continue to receive critical bug fixes, we strongly encourage you to upgrade. Migration guide can be found: https://github.com/Azure/azure-sdk-for-js/blob/main/sdk/storage/MigrationGuide.md", "dependencies": { - "browserify-mime": "~1.2.9", + "browserify-mime": "^1.2.9", "extend": "^3.0.2", - "json-edm-parser": "0.1.2", - "md5.js": "1.3.4", - "readable-stream": "~2.0.0", + "json-edm-parser": "~0.1.2", + "json-schema": "~0.4.0", + "md5.js": "^1.3.4", + "readable-stream": "^2.0.0", "request": "^2.86.0", - "underscore": "~1.8.3", + "underscore": "^1.12.1", "uuid": "^3.0.0", - "validator": "~9.4.1", - "xml2js": "0.2.8", + "validator": "^13.7.0", + "xml2js": "~0.2.8", "xmlbuilder": "^9.0.7" }, "engines": { @@ -360,22 +371,6 @@ "node": ">= 6" } }, - "node_modules/block-stream": { - "version": "0.0.9", - "resolved": "https://registry.npmjs.org/block-stream/-/block-stream-0.0.9.tgz", - "integrity": "sha1-E+v+d4oDIFz+A3UUgeu0szAMEmo=", - "dependencies": { - "inherits": "~2.0.0" - }, - "engines": { - "node": "0.4 || >=0.5.8" - } - }, - "node_modules/bluebird": { - "version": "2.11.0", - "resolved": "https://registry.npmjs.org/bluebird/-/bluebird-2.11.0.tgz", - "integrity": "sha1-U0uQM8AiyVecVro7Plpcqvu2UOE=" - }, "node_modules/brace-expansion": { "version": "1.1.11", "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", @@ -438,6 +433,14 @@ "resolved": "https://registry.npmjs.org/chardet/-/chardet-0.7.0.tgz", "integrity": "sha512-mT8iDcrh03qDGRRmoA2hmBJnxpllMR+0/0qlzjqZES6NdiWDcZkCNAk4rPFZ9Q85r27unkiNNg8ZOiwZXBHwcA==" }, + "node_modules/chownr": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/chownr/-/chownr-2.0.0.tgz", + "integrity": "sha512-bIomtDF5KGpdogkLd9VspvFzk9KfpyyGlS8YFVZl7TGPBHL5snIOnxeshwVgPteQ9b4Eydl+pVbIyE1DcvCWgQ==", + "engines": { + "node": ">=10" + } + }, "node_modules/cli-cursor": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-2.1.0.tgz", @@ -478,11 +481,6 @@ "node": ">= 0.8" } }, - "node_modules/commander": { - "version": "2.20.3", - "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz", - "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==" - }, "node_modules/compress-commons": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/compress-commons/-/compress-commons-4.0.1.tgz", @@ -973,25 +971,22 @@ "resolved": "https://registry.npmjs.org/fs-constants/-/fs-constants-1.0.0.tgz", "integrity": "sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow==" }, - "node_modules/fs.realpath": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", - "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=" - }, - "node_modules/fstream": { - "version": "1.0.12", - "resolved": "https://registry.npmjs.org/fstream/-/fstream-1.0.12.tgz", - "integrity": "sha512-WvJ193OHa0GHPEL+AycEJgxvBEwyfRkN1vhjca23OaPVMCaLCXTd5qAu82AjTcgP1UJmytkOKb63Ypde7raDIg==", + "node_modules/fs-minipass": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-2.1.0.tgz", + "integrity": "sha512-V/JgOLFCS+R6Vcq0slCuaeWEdNC3ouDlJMNIsacH2VtALiu9mV4LPrHc5cDl8k5aw6J8jwgWWpiTo5RYhmIzvg==", "dependencies": { - "graceful-fs": "^4.1.2", - "inherits": "~2.0.0", - "mkdirp": ">=0.5 0", - "rimraf": "2" + "minipass": "^3.0.0" }, "engines": { - "node": ">=0.6" + "node": ">= 8" } }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=" + }, "node_modules/function-bind": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", @@ -1220,9 +1215,9 @@ } }, "node_modules/inquirer/node_modules/ansi-regex": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz", - "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==", + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.1.tgz", + "integrity": "sha512-ILlv4k/3f6vfQ4OoP2AGvirOktlQ98ZEL1k9FaQjxa3L1abBgbuTDAdPOpvbGncC0BTVQrl+OM8xZGK6tWXt7g==", "engines": { "node": ">=6" } @@ -1392,9 +1387,9 @@ } }, "node_modules/json-schema": { - "version": "0.2.3", - "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.2.3.tgz", - "integrity": "sha1-tIDIkuWaLwWVTOcnvT8qTogvnhM=" + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.4.0.tgz", + "integrity": "sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA==" }, "node_modules/json-schema-traverse": { "version": "0.4.1", @@ -1420,23 +1415,23 @@ ] }, "node_modules/jsprim": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/jsprim/-/jsprim-1.4.1.tgz", - "integrity": "sha1-MT5mvB5cwG5Di8G3SZwuXFastqI=", - "engines": [ - "node >=0.6.0" - ], + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/jsprim/-/jsprim-1.4.2.tgz", + "integrity": "sha512-P2bSOMAc/ciLz6DzgjVlGJP9+BrJWu5UDGK70C2iweC5QBIeFf0ZXRvGjEj2uYgrY2MkAAhsSWHDWlFtEroZWw==", "dependencies": { "assert-plus": "1.0.0", "extsprintf": "1.3.0", - "json-schema": "0.2.3", + "json-schema": "0.4.0", "verror": "1.10.0" + }, + "engines": { + "node": ">=0.6.0" } }, "node_modules/jszip": { - "version": "3.5.0", - "resolved": "https://registry.npmjs.org/jszip/-/jszip-3.5.0.tgz", - "integrity": "sha512-WRtu7TPCmYePR1nazfrtuF216cIVon/3GWOvHS9QR5bIwSbnxtdpma6un3jyGGNhHsKCSzn5Ypk+EkDRvTGiFA==", + "version": "3.9.0", + "resolved": "https://registry.npmjs.org/jszip/-/jszip-3.9.0.tgz", + "integrity": "sha512-Vb3SMfASUN1EKrFzv5A5+lTaZnzLzT5E6A9zyT7WFqMSfhT2Z7iS5FgSOjx2Olm3MDj8OqKj6GHyP2kMt1Ir6w==", "dependencies": { "lie": "~3.3.0", "pako": "~1.0.2", @@ -1508,9 +1503,9 @@ } }, "node_modules/lodash": { - "version": "4.17.19", - "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.19.tgz", - "integrity": "sha512-JNvd8XER9GQX0v2qJgsaN/mzFCNA5BRe/j8JN9d+tWyGLSodKQHKFicdwNYzWwI3wjRnaKPsGj1XkBjx/F96DQ==" + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==" }, "node_modules/lodash.defaults": { "version": "4.2.0", @@ -1585,9 +1580,32 @@ } }, "node_modules/minimist": { - "version": "1.2.5", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz", - "integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw==" + "version": "1.2.6", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.6.tgz", + "integrity": "sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q==" + }, + "node_modules/minipass": { + "version": "3.1.6", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.1.6.tgz", + "integrity": "sha512-rty5kpw9/z8SX9dmxblFA6edItUmwJgMeYDZRrwlIVN27i8gysGbznJwUggw2V/FVqFSDdWy040ZPS811DYAqQ==", + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/minizlib": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-2.1.2.tgz", + "integrity": "sha512-bAxsR8BVfj60DWXHE3u30oHzfl4G7khkSuPW+qvpd7jFRHm7dLxOjUk1EHACJ/hxLY8phGJ0YhYHZo7jil7Qdg==", + "dependencies": { + "minipass": "^3.0.0", + "yallist": "^4.0.0" + }, + "engines": { + "node": ">= 8" + } }, "node_modules/mkdirp": { "version": "0.5.5", @@ -1600,11 +1618,6 @@ "mkdirp": "bin/cmd.js" } }, - "node_modules/mout": { - "version": "0.11.1", - "resolved": "https://registry.npmjs.org/mout/-/mout-0.11.1.tgz", - "integrity": "sha1-ujYR318OWx/7/QEWa48C0fX6K5k=" - }, "node_modules/ms": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", @@ -2134,9 +2147,9 @@ } }, "node_modules/table/node_modules/ansi-regex": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz", - "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==", + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.1.tgz", + "integrity": "sha512-ILlv4k/3f6vfQ4OoP2AGvirOktlQ98ZEL1k9FaQjxa3L1abBgbuTDAdPOpvbGncC0BTVQrl+OM8xZGK6tWXt7g==", "engines": { "node": ">=6" } @@ -2166,14 +2179,19 @@ } }, "node_modules/tar": { - "version": "2.2.2", - "resolved": "https://registry.npmjs.org/tar/-/tar-2.2.2.tgz", - "integrity": "sha512-FCEhQ/4rE1zYv9rYXJw/msRqsnmlje5jHP6huWeBZ704jUTy02c5AZyWujpMR1ax6mVw9NyJMfuK2CMDWVIfgA==", - "deprecated": "This version of tar is no longer supported, and will not receive security updates. Please upgrade asap.", - "dependencies": { - "block-stream": "*", - "fstream": "^1.0.12", - "inherits": "2" + "version": "6.1.11", + "resolved": "https://registry.npmjs.org/tar/-/tar-6.1.11.tgz", + "integrity": "sha512-an/KZQzQUkZCkuoAA64hM92X0Urb6VpRhAFllDzz44U2mcD5scmT3zBc4VgVpkugF580+DQn8eAFSyoQt0tznA==", + "dependencies": { + "chownr": "^2.0.0", + "fs-minipass": "^2.0.0", + "minipass": "^3.0.0", + "minizlib": "^2.1.1", + "mkdirp": "^1.0.3", + "yallist": "^4.0.0" + }, + "engines": { + "node": ">= 10" } }, "node_modules/tar-stream": { @@ -2204,20 +2222,15 @@ "node": ">= 6" } }, - "node_modules/tar.gz": { - "version": "1.0.7", - "resolved": "https://registry.npmjs.org/tar.gz/-/tar.gz-1.0.7.tgz", - "integrity": "sha512-uhGatJvds/3diZrETqMj4RxBR779LKlIE74SsMcn5JProZsfs9j0QBwWO1RW+IWNJxS2x8Zzra1+AW6OQHWphg==", - "deprecated": "⚠️ WARNING ⚠️ tar.gz module has been deprecated and your application is vulnerable. Please use tar module instead: https://npmjs.com/tar", - "dependencies": { - "bluebird": "^2.9.34", - "commander": "^2.8.1", - "fstream": "^1.0.8", - "mout": "^0.11.0", - "tar": "^2.1.1" - }, + "node_modules/tar/node_modules/mkdirp": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz", + "integrity": "sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==", "bin": { - "targz": "bin/targz" + "mkdirp": "bin/cmd.js" + }, + "engines": { + "node": ">=10" } }, "node_modules/text-table": { @@ -2294,13 +2307,13 @@ } }, "node_modules/typed-rest-client": { - "version": "1.7.3", - "resolved": "https://registry.npmjs.org/typed-rest-client/-/typed-rest-client-1.7.3.tgz", - "integrity": "sha512-CwTpx/TkRHGZoHkJhBcp4X8K3/WtlzSHVQR0OIFnt10j4tgy4ypgq/SrrgVpA1s6tAL49Q6J3R5C0Cgfh2ddqA==", + "version": "1.8.6", + "resolved": "https://registry.npmjs.org/typed-rest-client/-/typed-rest-client-1.8.6.tgz", + "integrity": "sha512-xcQpTEAJw2DP7GqVNECh4dD+riS+C1qndXLfBCJ3xk0kqprtGN491P5KlmrDbKdtuW8NEcP/5ChxiJI3S9WYTA==", "dependencies": { "qs": "^6.9.1", "tunnel": "0.0.6", - "underscore": "1.8.3" + "underscore": "^1.12.1" } }, "node_modules/typescript": { @@ -2316,9 +2329,9 @@ } }, "node_modules/underscore": { - "version": "1.8.3", - "resolved": "https://registry.npmjs.org/underscore/-/underscore-1.8.3.tgz", - "integrity": "sha1-Tz+1OxBuYJf8+ctBCfKl6b36UCI=" + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/underscore/-/underscore-1.13.2.tgz", + "integrity": "sha512-ekY1NhRzq0B08g4bGuX4wd2jZx5GnKz6mKSqFL4nqBlfyMGiG10gDFhDTMEfYmDL6Jy0FUIZp7wiRB+0BP7J2g==" }, "node_modules/uri-js": { "version": "4.2.2", @@ -2356,9 +2369,9 @@ } }, "node_modules/validator": { - "version": "9.4.1", - "resolved": "https://registry.npmjs.org/validator/-/validator-9.4.1.tgz", - "integrity": "sha512-YV5KjzvRmSyJ1ee/Dm5UED0G+1L4GZnLN3w6/T+zZm8scVua4sOhYKWTUrKa0H/tMiJyO9QLHMPN+9mB/aMunA==", + "version": "13.7.0", + "resolved": "https://registry.npmjs.org/validator/-/validator-13.7.0.tgz", + "integrity": "sha512-nYXQLCBkpJ8X6ltALua9dRrZDHVYxjJ1wgskNt1lH9fzGjs3tgojGSCBjmEPwkWS1y29+DrizMTW19Pr9uB2nw==", "engines": { "node": ">= 0.10" } @@ -2430,6 +2443,11 @@ "node": ">=4" } }, + "node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" + }, "node_modules/yauzl": { "version": "2.10.0", "resolved": "https://registry.npmjs.org/yauzl/-/yauzl-2.10.0.tgz", @@ -2488,9 +2506,12 @@ }, "dependencies": { "@actions/core": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/@actions/core/-/core-1.2.4.tgz", - "integrity": "sha512-YJCEq8BE3CdN8+7HPZ/4DxJjk/OkZV2FFIf+DlZTC/4iBlzYCD5yjRR6eiOS5llO11zbRltIRuKAjMKaWTE6cg==" + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/@actions/core/-/core-1.6.0.tgz", + "integrity": "sha512-NB1UAZomZlCV/LmJqkLhNTqtKfFXJZAUPcfl/zqG7EfsQdeUJtaWO98SGbuQ3pydJ3fHl2CvI/51OKYlCYYcaw==", + "requires": { + "@actions/http-client": "^1.0.11" + } }, "@actions/exec": { "version": "1.0.4", @@ -2500,6 +2521,14 @@ "@actions/io": "^1.0.1" } }, + "@actions/http-client": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/@actions/http-client/-/http-client-1.0.11.tgz", + "integrity": "sha512-VRYHGQV1rqnROJqdMvGUbY/Kn8vriQe/F9HR2AlYHzmKuM/p3kjNuXhmdBfcVgsvRWTz5C5XW5xvndZrVBuAYg==", + "requires": { + "tunnel": "0.0.6" + } + }, "@actions/io": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/@actions/io/-/io-1.0.2.tgz", @@ -2566,9 +2595,9 @@ "integrity": "sha512-cBhpre4ma+U0T1oM5fXg7Dy1Jw7zzwv7lt/GoCpr+hDQJoYnKVPLL4dCvSEFMmQurOQvSrwT7SL/DAlhBI97RQ==" }, "ansi-regex": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.0.tgz", - "integrity": "sha1-7QMXwyIGT3lGbAKWa922Bas32Zg=" + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.1.tgz", + "integrity": "sha512-+O9Jct8wf++lXxxFc4hc8LsjaSq0HFzzL7cVsw8pRDIPdjKD2mT4ytDZlLuSBZ4cLKZFXIrMGO7DbQCtMJJMKw==" }, "ansi-styles": { "version": "3.2.1", @@ -2696,20 +2725,21 @@ } }, "azure-storage": { - "version": "2.10.3", - "resolved": "https://registry.npmjs.org/azure-storage/-/azure-storage-2.10.3.tgz", - "integrity": "sha512-IGLs5Xj6kO8Ii90KerQrrwuJKexLgSwYC4oLWmc11mzKe7Jt2E5IVg+ZQ8K53YWZACtVTMBNO3iGuA+4ipjJxQ==", + "version": "2.10.7", + "resolved": "https://registry.npmjs.org/azure-storage/-/azure-storage-2.10.7.tgz", + "integrity": "sha512-4oeFGtn3Ziw/fGs/zkoIpKKtygnCVIcZwzJ7UQzKTxhkGQqVCByOFbYqMGYR3L+wOsunX9lNfD0jc51SQuKSSA==", "requires": { - "browserify-mime": "~1.2.9", + "browserify-mime": "^1.2.9", "extend": "^3.0.2", - "json-edm-parser": "0.1.2", - "md5.js": "1.3.4", - "readable-stream": "~2.0.0", + "json-edm-parser": "~0.1.2", + "json-schema": "~0.4.0", + "md5.js": "^1.3.4", + "readable-stream": "^2.0.0", "request": "^2.86.0", - "underscore": "~1.8.3", + "underscore": "^1.12.1", "uuid": "^3.0.0", - "validator": "~9.4.1", - "xml2js": "0.2.8", + "validator": "^13.7.0", + "xml2js": "~0.2.8", "xmlbuilder": "^9.0.7" }, "dependencies": { @@ -2773,19 +2803,6 @@ } } }, - "block-stream": { - "version": "0.0.9", - "resolved": "https://registry.npmjs.org/block-stream/-/block-stream-0.0.9.tgz", - "integrity": "sha1-E+v+d4oDIFz+A3UUgeu0szAMEmo=", - "requires": { - "inherits": "~2.0.0" - } - }, - "bluebird": { - "version": "2.11.0", - "resolved": "https://registry.npmjs.org/bluebird/-/bluebird-2.11.0.tgz", - "integrity": "sha1-U0uQM8AiyVecVro7Plpcqvu2UOE=" - }, "brace-expansion": { "version": "1.1.11", "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", @@ -2839,6 +2856,11 @@ "resolved": "https://registry.npmjs.org/chardet/-/chardet-0.7.0.tgz", "integrity": "sha512-mT8iDcrh03qDGRRmoA2hmBJnxpllMR+0/0qlzjqZES6NdiWDcZkCNAk4rPFZ9Q85r27unkiNNg8ZOiwZXBHwcA==" }, + "chownr": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/chownr/-/chownr-2.0.0.tgz", + "integrity": "sha512-bIomtDF5KGpdogkLd9VspvFzk9KfpyyGlS8YFVZl7TGPBHL5snIOnxeshwVgPteQ9b4Eydl+pVbIyE1DcvCWgQ==" + }, "cli-cursor": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-2.1.0.tgz", @@ -2873,11 +2895,6 @@ "delayed-stream": "~1.0.0" } }, - "commander": { - "version": "2.20.3", - "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz", - "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==" - }, "compress-commons": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/compress-commons/-/compress-commons-4.0.1.tgz", @@ -3275,22 +3292,19 @@ "resolved": "https://registry.npmjs.org/fs-constants/-/fs-constants-1.0.0.tgz", "integrity": "sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow==" }, + "fs-minipass": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-2.1.0.tgz", + "integrity": "sha512-V/JgOLFCS+R6Vcq0slCuaeWEdNC3ouDlJMNIsacH2VtALiu9mV4LPrHc5cDl8k5aw6J8jwgWWpiTo5RYhmIzvg==", + "requires": { + "minipass": "^3.0.0" + } + }, "fs.realpath": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=" }, - "fstream": { - "version": "1.0.12", - "resolved": "https://registry.npmjs.org/fstream/-/fstream-1.0.12.tgz", - "integrity": "sha512-WvJ193OHa0GHPEL+AycEJgxvBEwyfRkN1vhjca23OaPVMCaLCXTd5qAu82AjTcgP1UJmytkOKb63Ypde7raDIg==", - "requires": { - "graceful-fs": "^4.1.2", - "inherits": "~2.0.0", - "mkdirp": ">=0.5 0", - "rimraf": "2" - } - }, "function-bind": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", @@ -3468,9 +3482,9 @@ }, "dependencies": { "ansi-regex": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz", - "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==" + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.1.tgz", + "integrity": "sha512-ILlv4k/3f6vfQ4OoP2AGvirOktlQ98ZEL1k9FaQjxa3L1abBgbuTDAdPOpvbGncC0BTVQrl+OM8xZGK6tWXt7g==" }, "strip-ansi": { "version": "5.2.0", @@ -3589,9 +3603,9 @@ } }, "json-schema": { - "version": "0.2.3", - "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.2.3.tgz", - "integrity": "sha1-tIDIkuWaLwWVTOcnvT8qTogvnhM=" + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.4.0.tgz", + "integrity": "sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA==" }, "json-schema-traverse": { "version": "0.4.1", @@ -3614,20 +3628,20 @@ "integrity": "sha1-XAxWhRBxYOcv50ib3eoLRMK8Z70=" }, "jsprim": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/jsprim/-/jsprim-1.4.1.tgz", - "integrity": "sha1-MT5mvB5cwG5Di8G3SZwuXFastqI=", + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/jsprim/-/jsprim-1.4.2.tgz", + "integrity": "sha512-P2bSOMAc/ciLz6DzgjVlGJP9+BrJWu5UDGK70C2iweC5QBIeFf0ZXRvGjEj2uYgrY2MkAAhsSWHDWlFtEroZWw==", "requires": { "assert-plus": "1.0.0", "extsprintf": "1.3.0", - "json-schema": "0.2.3", + "json-schema": "0.4.0", "verror": "1.10.0" } }, "jszip": { - "version": "3.5.0", - "resolved": "https://registry.npmjs.org/jszip/-/jszip-3.5.0.tgz", - "integrity": "sha512-WRtu7TPCmYePR1nazfrtuF216cIVon/3GWOvHS9QR5bIwSbnxtdpma6un3jyGGNhHsKCSzn5Ypk+EkDRvTGiFA==", + "version": "3.9.0", + "resolved": "https://registry.npmjs.org/jszip/-/jszip-3.9.0.tgz", + "integrity": "sha512-Vb3SMfASUN1EKrFzv5A5+lTaZnzLzT5E6A9zyT7WFqMSfhT2Z7iS5FgSOjx2Olm3MDj8OqKj6GHyP2kMt1Ir6w==", "requires": { "lie": "~3.3.0", "pako": "~1.0.2", @@ -3695,9 +3709,9 @@ } }, "lodash": { - "version": "4.17.19", - "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.19.tgz", - "integrity": "sha512-JNvd8XER9GQX0v2qJgsaN/mzFCNA5BRe/j8JN9d+tWyGLSodKQHKFicdwNYzWwI3wjRnaKPsGj1XkBjx/F96DQ==" + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==" }, "lodash.defaults": { "version": "4.2.0", @@ -3760,9 +3774,26 @@ } }, "minimist": { - "version": "1.2.5", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz", - "integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw==" + "version": "1.2.6", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.6.tgz", + "integrity": "sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q==" + }, + "minipass": { + "version": "3.1.6", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.1.6.tgz", + "integrity": "sha512-rty5kpw9/z8SX9dmxblFA6edItUmwJgMeYDZRrwlIVN27i8gysGbznJwUggw2V/FVqFSDdWy040ZPS811DYAqQ==", + "requires": { + "yallist": "^4.0.0" + } + }, + "minizlib": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-2.1.2.tgz", + "integrity": "sha512-bAxsR8BVfj60DWXHE3u30oHzfl4G7khkSuPW+qvpd7jFRHm7dLxOjUk1EHACJ/hxLY8phGJ0YhYHZo7jil7Qdg==", + "requires": { + "minipass": "^3.0.0", + "yallist": "^4.0.0" + } }, "mkdirp": { "version": "0.5.5", @@ -3772,11 +3803,6 @@ "minimist": "^1.2.5" } }, - "mout": { - "version": "0.11.1", - "resolved": "https://registry.npmjs.org/mout/-/mout-0.11.1.tgz", - "integrity": "sha1-ujYR318OWx/7/QEWa48C0fX6K5k=" - }, "ms": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", @@ -4174,9 +4200,9 @@ }, "dependencies": { "ansi-regex": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz", - "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==" + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.1.tgz", + "integrity": "sha512-ILlv4k/3f6vfQ4OoP2AGvirOktlQ98ZEL1k9FaQjxa3L1abBgbuTDAdPOpvbGncC0BTVQrl+OM8xZGK6tWXt7g==" }, "string-width": { "version": "3.1.0", @@ -4199,13 +4225,23 @@ } }, "tar": { - "version": "2.2.2", - "resolved": "https://registry.npmjs.org/tar/-/tar-2.2.2.tgz", - "integrity": "sha512-FCEhQ/4rE1zYv9rYXJw/msRqsnmlje5jHP6huWeBZ704jUTy02c5AZyWujpMR1ax6mVw9NyJMfuK2CMDWVIfgA==", + "version": "6.1.11", + "resolved": "https://registry.npmjs.org/tar/-/tar-6.1.11.tgz", + "integrity": "sha512-an/KZQzQUkZCkuoAA64hM92X0Urb6VpRhAFllDzz44U2mcD5scmT3zBc4VgVpkugF580+DQn8eAFSyoQt0tznA==", "requires": { - "block-stream": "*", - "fstream": "^1.0.12", - "inherits": "2" + "chownr": "^2.0.0", + "fs-minipass": "^2.0.0", + "minipass": "^3.0.0", + "minizlib": "^2.1.1", + "mkdirp": "^1.0.3", + "yallist": "^4.0.0" + }, + "dependencies": { + "mkdirp": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz", + "integrity": "sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==" + } } }, "tar-stream": { @@ -4232,18 +4268,6 @@ } } }, - "tar.gz": { - "version": "1.0.7", - "resolved": "https://registry.npmjs.org/tar.gz/-/tar.gz-1.0.7.tgz", - "integrity": "sha512-uhGatJvds/3diZrETqMj4RxBR779LKlIE74SsMcn5JProZsfs9j0QBwWO1RW+IWNJxS2x8Zzra1+AW6OQHWphg==", - "requires": { - "bluebird": "^2.9.34", - "commander": "^2.8.1", - "fstream": "^1.0.8", - "mout": "^0.11.0", - "tar": "^2.1.1" - } - }, "text-table": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", @@ -4303,13 +4327,13 @@ } }, "typed-rest-client": { - "version": "1.7.3", - "resolved": "https://registry.npmjs.org/typed-rest-client/-/typed-rest-client-1.7.3.tgz", - "integrity": "sha512-CwTpx/TkRHGZoHkJhBcp4X8K3/WtlzSHVQR0OIFnt10j4tgy4ypgq/SrrgVpA1s6tAL49Q6J3R5C0Cgfh2ddqA==", + "version": "1.8.6", + "resolved": "https://registry.npmjs.org/typed-rest-client/-/typed-rest-client-1.8.6.tgz", + "integrity": "sha512-xcQpTEAJw2DP7GqVNECh4dD+riS+C1qndXLfBCJ3xk0kqprtGN491P5KlmrDbKdtuW8NEcP/5ChxiJI3S9WYTA==", "requires": { "qs": "^6.9.1", "tunnel": "0.0.6", - "underscore": "1.8.3" + "underscore": "^1.12.1" } }, "typescript": { @@ -4318,9 +4342,9 @@ "integrity": "sha512-BLbiRkiBzAwsjut4x/dsibSTB6yWpwT5qWmC2OfuCg3GgVQCSgMs4vEctYPhsaGtd0AeuuHMkjZ2h2WG8MSzRw==" }, "underscore": { - "version": "1.8.3", - "resolved": "https://registry.npmjs.org/underscore/-/underscore-1.8.3.tgz", - "integrity": "sha1-Tz+1OxBuYJf8+ctBCfKl6b36UCI=" + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/underscore/-/underscore-1.13.2.tgz", + "integrity": "sha512-ekY1NhRzq0B08g4bGuX4wd2jZx5GnKz6mKSqFL4nqBlfyMGiG10gDFhDTMEfYmDL6Jy0FUIZp7wiRB+0BP7J2g==" }, "uri-js": { "version": "4.2.2", @@ -4354,9 +4378,9 @@ "integrity": "sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==" }, "validator": { - "version": "9.4.1", - "resolved": "https://registry.npmjs.org/validator/-/validator-9.4.1.tgz", - "integrity": "sha512-YV5KjzvRmSyJ1ee/Dm5UED0G+1L4GZnLN3w6/T+zZm8scVua4sOhYKWTUrKa0H/tMiJyO9QLHMPN+9mB/aMunA==" + "version": "13.7.0", + "resolved": "https://registry.npmjs.org/validator/-/validator-13.7.0.tgz", + "integrity": "sha512-nYXQLCBkpJ8X6ltALua9dRrZDHVYxjJ1wgskNt1lH9fzGjs3tgojGSCBjmEPwkWS1y29+DrizMTW19Pr9uB2nw==" }, "verror": { "version": "1.10.0", @@ -4407,6 +4431,11 @@ "mkdirp": "^0.5.1" } }, + "yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" + }, "yauzl": { "version": "2.10.0", "resolved": "https://registry.npmjs.org/yauzl/-/yauzl-2.10.0.tgz", diff --git a/package.json b/package.json index b67f1306..9ed7969d 100644 --- a/package.json +++ b/package.json @@ -29,9 +29,8 @@ "azure-storage": "^2.10.3", "eslint": "^5.16.0", "jszip": "^3.5.0", - "tar.gz": "^1.0.7", + "tar": "^6.1.11", "typescript": "^3.9.7", "zip-lib": "^0.7.1" - }, - "devDependencies": {} + } } From 646aeea9ed04b611e55aebc29895ee5b88f652c3 Mon Sep 17 00:00:00 2001 From: Ron Truex Date: Mon, 4 Apr 2022 20:19:39 -0700 Subject: [PATCH 08/69] build updates --- .github/workflows/build_version.yaml | 2 +- action.yml | 2 +- dist/LICENSE | 3092 + dist/index.js | 122788 ++++++++++++++++++++++++ 4 files changed, 125882 insertions(+), 2 deletions(-) create mode 100644 dist/LICENSE diff --git a/.github/workflows/build_version.yaml b/.github/workflows/build_version.yaml index e2992077..cfe30b7d 100644 --- a/.github/workflows/build_version.yaml +++ b/.github/workflows/build_version.yaml @@ -24,7 +24,7 @@ jobs: npm install npm i -g @vercel/ncc - ncc build index.js --license LICENSE + ncc build lib/index.js --license LICENSE - name: Check for modified files id: git-check diff --git a/action.yml b/action.yml index 8e092ab3..98568f15 100644 --- a/action.yml +++ b/action.yml @@ -66,4 +66,4 @@ outputs: runs: using: 'node12' - main: 'lib/index.js' + main: 'dist/index.js' diff --git a/dist/LICENSE b/dist/LICENSE new file mode 100644 index 00000000..53752c06 --- /dev/null +++ b/dist/LICENSE @@ -0,0 +1,3092 @@ +@actions/core +MIT +The MIT License (MIT) + +Copyright 2019 GitHub + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +@actions/exec +MIT + +@actions/http-client +MIT +Actions Http Client for Node.js + +Copyright (c) GitHub, Inc. + +All rights reserved. + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and +associated documentation files (the "Software"), to deal in the Software without restriction, +including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT +LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +@actions/io +MIT + +ajv +MIT +The MIT License (MIT) + +Copyright (c) 2015-2017 Evgeny Poberezkin + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + + +archiver +MIT +Copyright (c) 2012-2014 Chris Talkington, contributors. + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +archiver-utils +MIT +Copyright (c) 2015 Chris Talkington. + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +asn1 +MIT +Copyright (c) 2011 Mark Cavage, All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE + + +assert-plus +MIT + +async +MIT +Copyright (c) 2010-2018 Caolan McMahon + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +asynckit +MIT +The MIT License (MIT) + +Copyright (c) 2016 Alex Indigo + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +aws-sign2 +Apache-2.0 +Apache License + +Version 2.0, January 2004 + +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. + +"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of this License; and + +You must cause any modified files to carry prominent notices stating that You changed the files; and + +You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and + +If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +aws4 +MIT +Copyright 2013 Michael Hart (michael.hart.au@gmail.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +azure-actions-webclient +MIT + +azure-storage +Apache-2.0 + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + +azure-storage-md5-wrapper +Apache-2.0 + +azure-storage-request-wrapper +Apache-2.0 + +balanced-match +MIT +(MIT) + +Copyright (c) 2013 Julian Gruber <julian@juliangruber.com> + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +bcrypt-pbkdf +BSD-3-Clause +The Blowfish portions are under the following license: + +Blowfish block cipher for OpenBSD +Copyright 1997 Niels Provos +All rights reserved. + +Implementation advice by David Mazieres . + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. +3. The name of the author may not be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +The bcrypt_pbkdf portions are under the following license: + +Copyright (c) 2013 Ted Unangst + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + + +Performance improvements (Javascript-specific): + +Copyright 2016, Joyent Inc +Author: Alex Wilson + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + +bl +MIT +The MIT License (MIT) +===================== + +Copyright (c) 2013-2019 bl contributors +---------------------------------- + +*bl contributors listed at * + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +brace-expansion +MIT +MIT License + +Copyright (c) 2013 Julian Gruber + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +browserify-mime +Copyright (c) 2010 Benjamin Thomas, Robert Kieffer + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +buffer-crc32 +MIT +The MIT License + +Copyright (c) 2013 Brian J. Brennan + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the +Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, +INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE +FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +caseless +Apache-2.0 +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION +1. Definitions. +"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. +"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. +"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. +"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. +"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. +"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. +"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). +"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. +"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. +2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. +3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. +4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: +You must give any other recipients of the Work or Derivative Works a copy of this License; and +You must cause any modified files to carry prominent notices stating that You changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. +5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. +6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. +7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. +8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. +9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. +END OF TERMS AND CONDITIONS + +combined-stream +MIT +Copyright (c) 2011 Debuggable Limited + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +compress-commons +MIT +Copyright (c) 2014 Chris Talkington, contributors. + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +concat-map +MIT +This software is released under the MIT license: + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +core-util-is +MIT +Copyright Node.js contributors. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to +deal in the Software without restriction, including without limitation the +rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +IN THE SOFTWARE. + + +crc +MIT +The MIT License (MIT) + +Copyright 2014 Alex Gorbatchev + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +crc32-stream +MIT +Copyright (c) 2014 Chris Talkington, contributors. + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +delayed-stream +MIT +Copyright (c) 2011 Debuggable Limited + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +ecc-jsbn +MIT +The MIT License (MIT) + +Copyright (c) 2014 Jeremie Miller + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +end-of-stream +MIT +The MIT License (MIT) + +Copyright (c) 2014 Mathias Buus + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +extend +MIT +The MIT License (MIT) + +Copyright (c) 2014 Stefan Thomas + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + + +extsprintf +MIT +Copyright (c) 2012, Joyent, Inc. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE + + +fast-deep-equal +MIT +MIT License + +Copyright (c) 2017 Evgeny Poberezkin + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +fast-json-stable-stringify +MIT +This software is released under the MIT license: + +Copyright (c) 2017 Evgeny Poberezkin +Copyright (c) 2013 James Halliday + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +forever-agent +Apache-2.0 +Apache License + +Version 2.0, January 2004 + +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. + +"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of this License; and + +You must cause any modified files to carry prominent notices stating that You changed the files; and + +You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and + +If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +form-data +MIT +Copyright (c) 2012 Felix Geisendörfer (felix@debuggable.com) and contributors + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. + + +fs-constants +MIT +The MIT License (MIT) + +Copyright (c) 2018 Mathias Buus + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +fs.realpath +ISC +The ISC License + +Copyright (c) Isaac Z. Schlueter and Contributors + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR +IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +---- + +This library bundles a version of the `fs.realpath` and `fs.realpathSync` +methods from Node.js v0.10 under the terms of the Node.js MIT license. + +Node's license follows, also included at the header of `old.js` which contains +the licensed code: + + Copyright Joyent, Inc. and other Node contributors. + + Permission is hereby granted, free of charge, to any person obtaining a + copy of this software and associated documentation files (the "Software"), + to deal in the Software without restriction, including without limitation + the rights to use, copy, modify, merge, publish, distribute, sublicense, + and/or sell copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + + +glob +ISC +The ISC License + +Copyright (c) Isaac Z. Schlueter and Contributors + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR +IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +## Glob Logo + +Glob's logo created by Tanya Brassie , licensed +under a Creative Commons Attribution-ShareAlike 4.0 International License +https://creativecommons.org/licenses/by-sa/4.0/ + + +graceful-fs +ISC +The ISC License + +Copyright (c) Isaac Z. Schlueter, Ben Noordhuis, and Contributors + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR +IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + +har-schema +ISC +Copyright (c) 2015, Ahmad Nassri + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + +har-validator +MIT +MIT License + +Copyright (c) 2018 Ahmad Nassri + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +http-signature +MIT +Copyright Joyent, Inc. All rights reserved. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to +deal in the Software without restriction, including without limitation the +rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +IN THE SOFTWARE. + + +inflight +ISC +The ISC License + +Copyright (c) Isaac Z. Schlueter + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR +IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + +inherits +ISC +The ISC License + +Copyright (c) Isaac Z. Schlueter + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH +REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, +INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR +OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +PERFORMANCE OF THIS SOFTWARE. + + + +is-typedarray +MIT +This software is released under the MIT license: + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +isarray +MIT + +isstream +MIT +The MIT License (MIT) +===================== + +Copyright (c) 2015 Rod Vagg +--------------------------- + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +jsbn +MIT +Licensing +--------- + +This software is covered under the following copyright: + +/* + * Copyright (c) 2003-2005 Tom Wu + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS-IS" AND WITHOUT WARRANTY OF ANY KIND, + * EXPRESS, IMPLIED OR OTHERWISE, INCLUDING WITHOUT LIMITATION, ANY + * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. + * + * IN NO EVENT SHALL TOM WU BE LIABLE FOR ANY SPECIAL, INCIDENTAL, + * INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY KIND, OR ANY DAMAGES WHATSOEVER + * RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER OR NOT ADVISED OF + * THE POSSIBILITY OF DAMAGE, AND ON ANY THEORY OF LIABILITY, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * In addition, the following condition applies: + * + * All redistributions must retain an intact copy of this copyright notice + * and disclaimer. + */ + +Address all questions regarding this license to: + + Tom Wu + tjw@cs.Stanford.EDU + +json-edm-parser +MIT +The MIT License (MIT) + +Copyright (c) 2016 Yang Xia + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + + +json-schema +(AFL-2.1 OR BSD-3-Clause) +Dojo is available under *either* the terms of the BSD 3-Clause "New" License *or* the +Academic Free License version 2.1. As a recipient of Dojo, you may choose which +license to receive this code under (except as noted in per-module LICENSE +files). Some modules may not be the copyright of the Dojo Foundation. These +modules contain explicit declarations of copyright in both the LICENSE files in +the directories in which they reside and in the code itself. No external +contributions are allowed under licenses which are fundamentally incompatible +with the AFL-2.1 OR and BSD-3-Clause licenses that Dojo is distributed under. + +The text of the AFL-2.1 and BSD-3-Clause licenses is reproduced below. + +------------------------------------------------------------------------------- +BSD 3-Clause "New" License: +********************** + +Copyright (c) 2005-2015, The Dojo Foundation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of the Dojo Foundation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------------------------------------------------------------------- +The Academic Free License, v. 2.1: +********************************** + +This Academic Free License (the "License") applies to any original work of +authorship (the "Original Work") whose owner (the "Licensor") has placed the +following notice immediately following the copyright notice for the Original +Work: + +Licensed under the Academic Free License version 2.1 + +1) Grant of Copyright License. Licensor hereby grants You a world-wide, +royalty-free, non-exclusive, perpetual, sublicenseable license to do the +following: + +a) to reproduce the Original Work in copies; + +b) to prepare derivative works ("Derivative Works") based upon the Original +Work; + +c) to distribute copies of the Original Work and Derivative Works to the +public; + +d) to perform the Original Work publicly; and + +e) to display the Original Work publicly. + +2) Grant of Patent License. Licensor hereby grants You a world-wide, +royalty-free, non-exclusive, perpetual, sublicenseable license, under patent +claims owned or controlled by the Licensor that are embodied in the Original +Work as furnished by the Licensor, to make, use, sell and offer for sale the +Original Work and Derivative Works. + +3) Grant of Source Code License. The term "Source Code" means the preferred +form of the Original Work for making modifications to it and all available +documentation describing how to modify the Original Work. Licensor hereby +agrees to provide a machine-readable copy of the Source Code of the Original +Work along with each copy of the Original Work that Licensor distributes. +Licensor reserves the right to satisfy this obligation by placing a +machine-readable copy of the Source Code in an information repository +reasonably calculated to permit inexpensive and convenient access by You for as +long as Licensor continues to distribute the Original Work, and by publishing +the address of that information repository in a notice immediately following +the copyright notice that applies to the Original Work. + +4) Exclusions From License Grant. Neither the names of Licensor, nor the names +of any contributors to the Original Work, nor any of their trademarks or +service marks, may be used to endorse or promote products derived from this +Original Work without express prior written permission of the Licensor. Nothing +in this License shall be deemed to grant any rights to trademarks, copyrights, +patents, trade secrets or any other intellectual property of Licensor except as +expressly stated herein. No patent license is granted to make, use, sell or +offer to sell embodiments of any patent claims other than the licensed claims +defined in Section 2. No right is granted to the trademarks of Licensor even if +such marks are included in the Original Work. Nothing in this License shall be +interpreted to prohibit Licensor from licensing under different terms from this +License any Original Work that Licensor otherwise would have a right to +license. + +5) This section intentionally omitted. + +6) Attribution Rights. You must retain, in the Source Code of any Derivative +Works that You create, all copyright, patent or trademark notices from the +Source Code of the Original Work, as well as any notices of licensing and any +descriptive text identified therein as an "Attribution Notice." You must cause +the Source Code for any Derivative Works that You create to carry a prominent +Attribution Notice reasonably calculated to inform recipients that You have +modified the Original Work. + +7) Warranty of Provenance and Disclaimer of Warranty. Licensor warrants that +the copyright in and to the Original Work and the patent rights granted herein +by Licensor are owned by the Licensor or are sublicensed to You under the terms +of this License with the permission of the contributor(s) of those copyrights +and patent rights. Except as expressly stated in the immediately proceeding +sentence, the Original Work is provided under this License on an "AS IS" BASIS +and WITHOUT WARRANTY, either express or implied, including, without limitation, +the warranties of NON-INFRINGEMENT, MERCHANTABILITY or FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY OF THE ORIGINAL WORK IS WITH YOU. +This DISCLAIMER OF WARRANTY constitutes an essential part of this License. No +license to Original Work is granted hereunder except under this disclaimer. + +8) Limitation of Liability. Under no circumstances and under no legal theory, +whether in tort (including negligence), contract, or otherwise, shall the +Licensor be liable to any person for any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License +or the use of the Original Work including, without limitation, damages for loss +of goodwill, work stoppage, computer failure or malfunction, or any and all +other commercial damages or losses. This limitation of liability shall not +apply to liability for death or personal injury resulting from Licensor's +negligence to the extent applicable law prohibits such limitation. Some +jurisdictions do not allow the exclusion or limitation of incidental or +consequential damages, so this exclusion and limitation may not apply to You. + +9) Acceptance and Termination. If You distribute copies of the Original Work or +a Derivative Work, You must make a reasonable effort under the circumstances to +obtain the express assent of recipients to the terms of this License. Nothing +else but this License (or another written agreement between Licensor and You) +grants You permission to create Derivative Works based upon the Original Work +or to exercise any of the rights granted in Section 1 herein, and any attempt +to do so except under the terms of this License (or another written agreement +between Licensor and You) is expressly prohibited by U.S. copyright law, the +equivalent laws of other countries, and by international treaty. Therefore, by +exercising any of the rights granted to You in Section 1 herein, You indicate +Your acceptance of this License and all of its terms and conditions. + +10) Termination for Patent Action. This License shall terminate automatically +and You may no longer exercise any of the rights granted to You by this License +as of the date You commence an action, including a cross-claim or counterclaim, +against Licensor or any licensee alleging that the Original Work infringes a +patent. This termination provision shall not apply for an action alleging +patent infringement by combinations of the Original Work with other software or +hardware. + +11) Jurisdiction, Venue and Governing Law. Any action or suit relating to this +License may be brought only in the courts of a jurisdiction wherein the +Licensor resides or in which Licensor conducts its primary business, and under +the laws of that jurisdiction excluding its conflict-of-law provisions. The +application of the United Nations Convention on Contracts for the International +Sale of Goods is expressly excluded. Any use of the Original Work outside the +scope of this License or after its termination shall be subject to the +requirements and penalties of the U.S. Copyright Act, 17 U.S.C. § 101 et +seq., the equivalent laws of other countries, and international treaty. This +section shall survive the termination of this License. + +12) Attorneys Fees. In any action to enforce the terms of this License or +seeking damages relating thereto, the prevailing party shall be entitled to +recover its costs and expenses, including, without limitation, reasonable +attorneys' fees and costs incurred in connection with such action, including +any appeal of such action. This section shall survive the termination of this +License. + +13) Miscellaneous. This License represents the complete agreement concerning +the subject matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent necessary to +make it enforceable. + +14) Definition of "You" in This License. "You" throughout this License, whether +in upper or lower case, means an individual or a legal entity exercising rights +under, and complying with all of the terms of, this License. For legal +entities, "You" includes any entity that controls, is controlled by, or is +under common control with you. For purposes of this definition, "control" means +(i) the power, direct or indirect, to cause the direction or management of such +entity, whether by contract or otherwise, or (ii) ownership of fifty percent +(50%) or more of the outstanding shares, or (iii) beneficial ownership of such +entity. + +15) Right to Use. You may use the Original Work in all ways not otherwise +restricted or conditioned by this License or by law, and Licensor promises not +to interfere with or be responsible for such uses by You. + +This license is Copyright (C) 2003-2004 Lawrence E. Rosen. All rights reserved. +Permission is hereby granted to copy and distribute this license without +modification. This license may not be modified without the express written +permission of its copyright owner. + + +json-schema-traverse +MIT +MIT License + +Copyright (c) 2017 Evgeny Poberezkin + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +json-stringify-safe +ISC +The ISC License + +Copyright (c) Isaac Z. Schlueter and Contributors + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR +IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + +jsonparse +MIT +The MIT License + +Copyright (c) 2012 Tim Caswell + +Permission is hereby granted, free of charge, +to any person obtaining a copy of this software and +associated documentation files (the "Software"), to +deal in the Software without restriction, including +without limitation the rights to use, copy, modify, +merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom +the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR +ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +jsprim +MIT +Copyright (c) 2012, Joyent, Inc. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE + + +lazystream +MIT +Copyright (c) 2013 J. Pommerening, contributors. + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + + + +lodash.defaults +MIT +Copyright jQuery Foundation and other contributors + +Based on Underscore.js, copyright Jeremy Ashkenas, +DocumentCloud and Investigative Reporters & Editors + +This software consists of voluntary contributions made by many +individuals. For exact contribution history, see the revision history +available at https://github.com/lodash/lodash + +The following license applies to all parts of this software except as +documented below: + +==== + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +==== + +Copyright and related rights for sample code are waived via CC0. Sample +code is defined as all source code displayed within the prose of the +documentation. + +CC0: http://creativecommons.org/publicdomain/zero/1.0/ + +==== + +Files located in the node_modules and vendor directories are externally +maintained libraries used by this software which have their own +licenses; we recommend you read them, as their terms may differ from the +terms above. + + +lodash.difference +MIT +Copyright jQuery Foundation and other contributors + +Based on Underscore.js, copyright Jeremy Ashkenas, +DocumentCloud and Investigative Reporters & Editors + +This software consists of voluntary contributions made by many +individuals. For exact contribution history, see the revision history +available at https://github.com/lodash/lodash + +The following license applies to all parts of this software except as +documented below: + +==== + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +==== + +Copyright and related rights for sample code are waived via CC0. Sample +code is defined as all source code displayed within the prose of the +documentation. + +CC0: http://creativecommons.org/publicdomain/zero/1.0/ + +==== + +Files located in the node_modules and vendor directories are externally +maintained libraries used by this software which have their own +licenses; we recommend you read them, as their terms may differ from the +terms above. + + +lodash.flatten +MIT +Copyright jQuery Foundation and other contributors + +Based on Underscore.js, copyright Jeremy Ashkenas, +DocumentCloud and Investigative Reporters & Editors + +This software consists of voluntary contributions made by many +individuals. For exact contribution history, see the revision history +available at https://github.com/lodash/lodash + +The following license applies to all parts of this software except as +documented below: + +==== + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +==== + +Copyright and related rights for sample code are waived via CC0. Sample +code is defined as all source code displayed within the prose of the +documentation. + +CC0: http://creativecommons.org/publicdomain/zero/1.0/ + +==== + +Files located in the node_modules and vendor directories are externally +maintained libraries used by this software which have their own +licenses; we recommend you read them, as their terms may differ from the +terms above. + + +lodash.isplainobject +MIT +Copyright jQuery Foundation and other contributors + +Based on Underscore.js, copyright Jeremy Ashkenas, +DocumentCloud and Investigative Reporters & Editors + +This software consists of voluntary contributions made by many +individuals. For exact contribution history, see the revision history +available at https://github.com/lodash/lodash + +The following license applies to all parts of this software except as +documented below: + +==== + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +==== + +Copyright and related rights for sample code are waived via CC0. Sample +code is defined as all source code displayed within the prose of the +documentation. + +CC0: http://creativecommons.org/publicdomain/zero/1.0/ + +==== + +Files located in the node_modules and vendor directories are externally +maintained libraries used by this software which have their own +licenses; we recommend you read them, as their terms may differ from the +terms above. + + +lodash.union +MIT +Copyright jQuery Foundation and other contributors + +Based on Underscore.js, copyright Jeremy Ashkenas, +DocumentCloud and Investigative Reporters & Editors + +This software consists of voluntary contributions made by many +individuals. For exact contribution history, see the revision history +available at https://github.com/lodash/lodash + +The following license applies to all parts of this software except as +documented below: + +==== + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +==== + +Copyright and related rights for sample code are waived via CC0. Sample +code is defined as all source code displayed within the prose of the +documentation. + +CC0: http://creativecommons.org/publicdomain/zero/1.0/ + +==== + +Files located in the node_modules and vendor directories are externally +maintained libraries used by this software which have their own +licenses; we recommend you read them, as their terms may differ from the +terms above. + + +mime-db +MIT + +The MIT License (MIT) + +Copyright (c) 2014 Jonathan Ong me@jongleberry.com + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +mime-types +MIT +(The MIT License) + +Copyright (c) 2014 Jonathan Ong +Copyright (c) 2015 Douglas Christopher Wilson + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +minimatch +ISC +The ISC License + +Copyright (c) Isaac Z. Schlueter and Contributors + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR +IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + +normalize-path +MIT +The MIT License (MIT) + +Copyright (c) 2014-2018, Jon Schlinkert. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +oauth-sign +Apache-2.0 +Apache License + +Version 2.0, January 2004 + +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. + +"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of this License; and + +You must cause any modified files to carry prominent notices stating that You changed the files; and + +You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and + +If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +once +ISC +The ISC License + +Copyright (c) Isaac Z. Schlueter and Contributors + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR +IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + +path-is-absolute +MIT +The MIT License (MIT) + +Copyright (c) Sindre Sorhus (sindresorhus.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +performance-now +MIT +Copyright (c) 2013 Braveg1rl + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +process-nextick-args +MIT +# Copyright (c) 2015 Calvin Metcalf + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +**THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE.** + + +psl +MIT +The MIT License (MIT) + +Copyright (c) 2017 Lupo Montero lupomontero@gmail.com + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +q +MIT +Copyright 2009–2017 Kristopher Michael Kowal. All rights reserved. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to +deal in the Software without restriction, including without limitation the +rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +IN THE SOFTWARE. + + +qs +BSD-3-Clause +BSD 3-Clause License + +Copyright (c) 2014, Nathan LaFreniere and other [contributors](https://github.com/ljharb/qs/graphs/contributors) +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +readable-stream +MIT +Copyright Joyent, Inc. and other Node contributors. All rights reserved. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to +deal in the Software without restriction, including without limitation the +rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +IN THE SOFTWARE. + + +readdir-glob +Apache-2.0 + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020 Yann Armelin + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +request +Apache-2.0 +Apache License + +Version 2.0, January 2004 + +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. + +"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of this License; and + +You must cause any modified files to carry prominent notices stating that You changed the files; and + +You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and + +If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +safe-buffer +MIT +The MIT License (MIT) + +Copyright (c) Feross Aboukhadijeh + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +safer-buffer +MIT +MIT License + +Copyright (c) 2018 Nikita Skovoroda + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +sax +BSD +Copyright (c) Isaac Z. Schlueter ("Author") +All rights reserved. + +The BSD License + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS +BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +The file "examples/strict.dtd" is licensed by the W3C and used according +to the terms of the W3C SOFTWARE NOTICE AND LICENSE. See LICENSE-W3C.html +for details. + + +sshpk +MIT +Copyright Joyent, Inc. All rights reserved. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to +deal in the Software without restriction, including without limitation the +rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +IN THE SOFTWARE. + + +string_decoder +MIT +Copyright Joyent, Inc. and other Node contributors. + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to permit +persons to whom the Software is furnished to do so, subject to the +following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +USE OR OTHER DEALINGS IN THE SOFTWARE. + + +tar-stream +MIT +The MIT License (MIT) + +Copyright (c) 2014 Mathias Buus + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +tough-cookie +BSD-3-Clause +Copyright (c) 2015, Salesforce.com, Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +3. Neither the name of Salesforce.com nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +tunnel +MIT +The MIT License (MIT) + +Copyright (c) 2012 Koichi Kobayashi + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +tunnel-agent +Apache-2.0 +Apache License + +Version 2.0, January 2004 + +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. + +"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of this License; and + +You must cause any modified files to carry prominent notices stating that You changed the files; and + +You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and + +If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +tweetnacl +Unlicense +This is free and unencumbered software released into the public domain. + +Anyone is free to copy, modify, publish, use, compile, sell, or +distribute this software, either in source code form or as a compiled +binary, for any purpose, commercial or non-commercial, and by any +means. + +In jurisdictions that recognize copyright laws, the author or authors +of this software dedicate any and all copyright interest in the +software to the public domain. We make this dedication for the benefit +of the public at large and to the detriment of our heirs and +successors. We intend this dedication to be an overt act of +relinquishment in perpetuity of all present and future rights to this +software under copyright law. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +For more information, please refer to + + +typed-rest-client +MIT +Typed Rest Client for Node.js + +Copyright (c) Microsoft Corporation + +All rights reserved. + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and +associated documentation files (the "Software"), to deal in the Software without restriction, +including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT +LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +/* Node-SMB/ntlm + * https://github.com/Node-SMB/ntlm + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * Copyright (C) 2012 Joshua M. Clulow + */ + + +underscore +MIT +Copyright (c) 2009-2021 Jeremy Ashkenas, Julian Gonggrijp, and DocumentCloud and Investigative Reporters & Editors + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + + +uri-js +BSD-2-Clause + +util-deprecate +MIT +(The MIT License) + +Copyright (c) 2014 Nathan Rajlich + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + + +uuid +MIT +The MIT License (MIT) + +Copyright (c) 2010-2016 Robert Kieffer and other contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +validator +MIT +Copyright (c) 2018 Chris O'Hara + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +verror +MIT +Copyright (c) 2016, Joyent, Inc. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE + + +wrappy +ISC +The ISC License + +Copyright (c) Isaac Z. Schlueter and Contributors + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR +IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + +xml2js +Copyright 2010, 2011, 2012, 2013. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to +deal in the Software without restriction, including without limitation the +rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +IN THE SOFTWARE. + + +xmlbuilder +MIT +The MIT License (MIT) + +Copyright (c) 2013 Ozgur Ozcitak + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +zip-stream +MIT +Copyright (c) 2014 Chris Talkington, contributors. + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. diff --git a/dist/index.js b/dist/index.js index e69de29b..85ba961a 100644 --- a/dist/index.js +++ b/dist/index.js @@ -0,0 +1,122788 @@ +/******/ (() => { // webpackBootstrap +/******/ var __webpack_modules__ = ({ + +/***/ 3537: +/***/ (function(__unused_webpack_module, exports, __nccwpck_require__) { + +"use strict"; + +var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } }); +}) : (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + o[k2] = m[k]; +})); +var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) { + Object.defineProperty(o, "default", { enumerable: true, value: v }); +}) : function(o, v) { + o["default"] = v; +}); +var __importStar = (this && this.__importStar) || function (mod) { + if (mod && mod.__esModule) return mod; + var result = {}; + if (mod != null) for (var k in mod) if (k !== "default" && Object.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k); + __setModuleDefault(result, mod); + return result; +}; +var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) { + function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); } + return new (P || (P = Promise))(function (resolve, reject) { + function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } } + function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } } + function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); } + step((generator = generator.apply(thisArg, _arguments || [])).next()); + }); +}; +Object.defineProperty(exports, "__esModule", ({ value: true })); +const AzureRestClient_1 = __nccwpck_require__(66120); +const core = __importStar(__nccwpck_require__(87454)); +var apiVersion = "2020-02-14"; +class ImageBuilderClient { + constructor(resourceAuthorizer, taskParameters) { + this._client = new AzureRestClient_1.ServiceClient(resourceAuthorizer); + this._taskParameters = taskParameters; + } + getTemplateId(templateName, subscriptionId) { + return __awaiter(this, void 0, void 0, function* () { + let httpRequest = { + method: 'GET', + uri: this._client.getRequestUri(`/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.VirtualMachineImages/imagetemplates/{imageTemplateName}`, { '{subscriptionId}': subscriptionId, '{resourceGroupName}': this._taskParameters.resourceGroupName, '{imageTemplateName}': templateName }, [], apiVersion) + }; + var resourceId = ""; + try { + var response = yield this._client.beginRequest(httpRequest); + if (response.statusCode != 200 || response.body.status == "Failed") + throw AzureRestClient_1.ToError(response); + if (response.statusCode == 200 && response.body.id) + resourceId = response.body.id; + } + catch (error) { + throw Error(`Get template call failed for template ${templateName} with error: ${JSON.stringify(error)}`); + } + return resourceId; + }); + } + putImageTemplate(template, templateName, subscriptionId) { + return __awaiter(this, void 0, void 0, function* () { + console.log("Submitting the template"); + let httpRequest = { + method: 'PUT', + uri: this._client.getRequestUri(`/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.VirtualMachineImages/imagetemplates/{imageTemplateName}`, { '{subscriptionId}': subscriptionId, '{resourceGroupName}': this._taskParameters.resourceGroupName, '{imageTemplateName}': templateName }, [], apiVersion), + body: template + }; + try { + var response = yield this._client.beginRequest(httpRequest); + if (response.statusCode == 201) { + response = yield this.getLongRunningOperationResult(response); + } + if (response.statusCode != 200 || response.body.status == "Failed") { + throw AzureRestClient_1.ToError(response); + } + if (response.statusCode == 200 && response.body && response.body.status == "Succeeded") { + console.log("Submitted template: \n", response.body.status); + } + } + catch (error) { + throw Error(`Submit template call failed for template ${templateName} with error: ${JSON.stringify(error)}`); + } + }); + } + getRunTemplate(templateName, subscriptionId){ + var response; + return __awaiter(this, void 0, void 0, function* () { + try { + let httpRequest = { + method: 'GET', + uri: this._client.getRequestUri(`/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.VirtualMachineImages/imagetemplates/{imageTemplateName}`, { '{subscriptionId}': subscriptionId, '{resourceGroupName}': this._taskParameters.resourceGroupName, '{imageTemplateName}': templateName }, [], apiVersion) + }; + response = yield this._client.beginRequest(httpRequest); + + if (response.statusCode == 202) { + response = yield this.getLongRunningOperationResult(response); + } + if (response.statusCode != 200 || response.body.status == "Failed") { + throw AzureRestClient_1.ToError(response); + } + if (response.statusCode == 200 && response.body && response.body.status == "Succeeded") { + console.log("Run template: \n", response.body.status); + } + return response + } + catch (error) { + throw Error(`Post template call failed for template ${templateName} with error: ${JSON.stringify(error)}`); + } + }); + } + runTemplate(templateName, subscriptionId, timeOutInMinutes) { + return __awaiter(this, void 0, void 0, function* () { + try { + console.log("Starting run template..."); + let httpRequest = { + method: 'POST', + uri: this._client.getRequestUri(`/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.VirtualMachineImages/imagetemplates/{imageTemplateName}/run`, { '{subscriptionId}': subscriptionId, '{resourceGroupName}': this._taskParameters.resourceGroupName, '{imageTemplateName}': templateName }, [], apiVersion) + }; + var response = yield this._client.beginRequest(httpRequest); + + if (response.statusCode == 202) { + if (this._taskParameters.actionRunMode == "nowait"){ + console.log("Action Run Mode set to NoWait. Skipping wait\n"); + return + } + response = yield this.getLongRunningOperationResult(response, timeOutInMinutes, templateName, subscriptionId); + } + if (response.statusCode != 200 || response.body.status == "Failed") { + throw AzureRestClient_1.ToError(response); + } + if (response.statusCode == 200 && response.body && response.body.status == "Succeeded") { + console.log("Run template: \n", response.body.status); + } + } + catch (error) { + throw Error(`Post template call failed for template ${templateName} with error: ${JSON.stringify(error)}`); + } + }); + } + deleteTemplate(templateName, subscriptionId) { + return __awaiter(this, void 0, void 0, function* () { + try { + console.log(`Deleting template ${templateName}...`); + let httpRequest = { + method: 'DELETE', + uri: this._client.getRequestUri(`/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.VirtualMachineImages/imagetemplates/{imageTemplateName}`, { '{subscriptionId}': subscriptionId, '{resourceGroupName}': this._taskParameters.resourceGroupName, '{imageTemplateName}': templateName }, [], apiVersion) + }; + var response = yield this._client.beginRequest(httpRequest); + if (response.statusCode == 202) { + response = yield this.getLongRunningOperationResult(response); + } + if (response.statusCode != 200 || response.body.status == "Failed") { + throw AzureRestClient_1.ToError(response); + } + if (response.statusCode == 200 && response.body && response.body.status == "Succeeded") { + console.log("Delete template: ", response.body.status); + } + } + catch (error) { + throw Error(`Delete template call failed for template ${templateName} with error: ${JSON.stringify(error)}`); + } + }); + } + getRunOutput(templateName, runOutput, subscriptionId) { + return __awaiter(this, void 0, void 0, function* () { + let httpRequest = { + method: 'GET', + uri: this._client.getRequestUri(`/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.VirtualMachineImages/imagetemplates/{imageTemplateName}/runOutputs/{runOutput}`, { '{subscriptionId}': subscriptionId, '{resourceGroupName}': this._taskParameters.resourceGroupName, '{imageTemplateName}': templateName, '{runOutput}': runOutput }, [], apiVersion) + }; + var output = ""; + try { + var response = yield this._client.beginRequest(httpRequest); + if (response.statusCode != 200 || response.body.status == "Failed") + throw AzureRestClient_1.ToError(response); + if (response.statusCode == 200 && response.body) { + if (response.body && response.body.properties.artifactId) + output = response.body.properties.artifactId; + else if (response.body && response.body.properties.artifactUri) + output = response.body.properties.artifactUri; + else + console.log(`Error in parsing response.body -- ${response.body}.`); + } + } + catch (error) { + throw Error(`Get runOutput call failed for template ${templateName} for ${runOutput} with error: ${JSON.stringify(error)}`); + } + return output; + }); + } + getLongRunningOperationResult(response, timeoutInMinutes, templateName, subscriptionId) { + var response; + return __awaiter(this, void 0, void 0, function* () { + var longRunningOperationRetryTimeout = !!timeoutInMinutes ? timeoutInMinutes : 0; + timeoutInMinutes = timeoutInMinutes || longRunningOperationRetryTimeout; + var timeout = new Date().getTime() + timeoutInMinutes * 60 * 1000; + var waitIndefinitely = timeoutInMinutes == 0; + var requestURI = response.headers["azure-asyncoperation"] || response.headers["location"]; + let httpRequest = { + method: 'GET', + uri: requestURI + }; + if (!httpRequest.uri) { + throw new Error("InvalidResponseLongRunningOperation"); + } + if (!httpRequest.uri) { + console.log("error in uri " + httpRequest.uri); + } + var sleepDuration = 15; + while (true) { + response = yield this._client.beginRequest(httpRequest); + if (response.statusCode === 202 || (response.body && (response.body.status == "Accepted" || response.body.status == "Running" || response.body.status == "InProgress"))) { + if (response.body && response.body.status) { + core.debug(response.body.status); + } + if (!waitIndefinitely && timeout < new Date().getTime()) { + throw Error(`error in url`); + } + if (this._taskParameters.actionRunMode == "buildonly" && templateName && subscriptionId){ + try{ + let runTemplate_result = null + try{ + runTemplate_result = yield this.getRunTemplate(templateName, subscriptionId).then(result=> (runTemplate_result = result)) + + if (!runTemplate_result.body.properties && !runTemplate_result.body.properties.lastRunStatus){ + if (runTemplate_result.properties.lastRunStatus.runSubState.toLowerCase() == "distributing"){ + console.log("Template is distributing set to break") + response = runTemplate_result + return response + } + } + } + catch(err){ + console.log(err) + } + } + catch(err){ + console.log(err) + } + } + + yield this.sleepFor(sleepDuration); + } + else { + break; + } + } + return response; + }); + } + sleepFor(sleepDurationInSeconds) { + return new Promise((resolve, reject) => { + setTimeout(resolve, sleepDurationInSeconds * 1000); + }); + } +} +exports["default"] = ImageBuilderClient; + + +/***/ }), + +/***/ 55399: +/***/ (function(__unused_webpack_module, exports, __nccwpck_require__) { + +"use strict"; + +var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } }); +}) : (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + o[k2] = m[k]; +})); +var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) { + Object.defineProperty(o, "default", { enumerable: true, value: v }); +}) : function(o, v) { + o["default"] = v; +}); +var __importStar = (this && this.__importStar) || function (mod) { + if (mod && mod.__esModule) return mod; + var result = {}; + if (mod != null) for (var k in mod) if (k !== "default" && Object.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k); + __setModuleDefault(result, mod); + return result; +}; +var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) { + function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); } + return new (P || (P = Promise))(function (resolve, reject) { + function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } } + function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } } + function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); } + step((generator = generator.apply(thisArg, _arguments || [])).next()); + }); +}; +Object.defineProperty(exports, "__esModule", ({ value: true })); +const Utils_1 = __importStar(__nccwpck_require__(17205)); +const AzureRestClient_1 = __nccwpck_require__(66120); +var defaultTemplate = ` +{ + "location": "", + "identity": { + "type": "UserAssigned", + "userAssignedIdentities": { + "IDENTITY": {} + } + }, + "properties": { + "source": SOURCE, + "customize": [CUSTOMIZE], + "distribute": [DISTRIBUTE], + "vmProfile": { + "vmSize": "VM_SIZE" + } + } + } +`; +var templateSource = new Map([ + ["managedimage", `{"type": "ManagedImage", "imageId": "IMAGE_ID"}`], + ["sharedimagegallery", `{"type": "SharedImageVersion", "imageVersionId": "IMAGE_ID"}`], + ["platformimage", `{"type": "PlatformImage", "publisher": "PUBLISHER_NAME", "offer": "OFFER_NAME","sku": "SKU_NAME", "version": "VERSION"}`] +]); +var templateCustomizer = new Map([ + ["shell", `{"type": "File", "name": "aibaction_file_copy", "sourceUri": "", "destination": ""},{"type": "Shell", "name": "aibaction_inline", "inline":[]}`], + ["shellInline", `{"type": "Shell", "name": "aibaction_inline", "inline":[]}`], + ["powershell", `{"type": "PowerShell", "name": "aibaction_inline", "inline":[]}`], + ["windowsUpdate", `{"type": "PowerShell", "name": "5minWait_is_needed_before_windowsUpdate", "inline":["Start-Sleep -Seconds 300"]},{"type": "WindowsUpdate", "searchCriteria": "IsInstalled=0", "filters": ["exclude:$_.Title -like '*Preview*'", "include:$true"]}`] +]); +var templateDistribute = new Map([ + ["managedimage", `{"type": "ManagedImage", "imageId": "IMAGE_ID", "location": "", "runOutputName": "ManagedImage_distribute", "artifactTags": {"RunURL": "URL", "GitHubRepo": "GITHUB_REPO", "GithubCommit": "GITHUB_COMMIT"}}`], + ["sharedimagegallery", `{"type": "SharedImage", "galleryImageId": "IMAGE_ID", "replicationRegions": [], "runOutputName": "SharedImage_distribute", "artifactTags": {"RunURL": "URL", "GitHubRepo": "GITHUB_REPO", "GithubCommit": "GITHUB_COMMIT"}}`], + ["vhd", `{"type": "VHD", "runOutputName": "VHD_distribute"}`] +]); +class BuildTemplate { + constructor(resourceAuthorizer, taskParameters) { + try { + this._taskParameters = taskParameters; + this._client = new AzureRestClient_1.ServiceClient(resourceAuthorizer); + } + catch (error) { + throw Error(error); + } + } + getLatestVersion(subscriptionId) { + return __awaiter(this, void 0, void 0, function* () { + let httpRequest = { + method: 'GET', + uri: this._client.getRequestUri(`/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions`, { '{subscriptionId}': subscriptionId, '{location}': this._taskParameters.location, '{publisherName}': this._taskParameters.imagePublisher, '{offer}': this._taskParameters.imageOffer, '{skus}': this._taskParameters.imageSku }, ["$orderby=name%20desc", "$top=1"], '2018-06-01') + }; + var latestVersion = ""; + try { + var response = yield this._client.beginRequest(httpRequest); + if (response.statusCode != 200 || response.body.statusCode == "Failed") { + throw Error(response.statusCode.toString()); + } + if (response.statusCode == 200 && response.body) + latestVersion = response.body[0].name; + } + catch (error) { + throw Error(`failed to get latest image version: request uri ${httpRequest.uri}: ${error}`); + } + return latestVersion; + }); + } + getTemplate(blobUrl, imgBuilderId, subscriptionId) { + return __awaiter(this, void 0, void 0, function* () { + var template = defaultTemplate; + template = template.replace("IDENTITY", imgBuilderId); + template = template.replace("VM_SIZE", this._taskParameters.vmSize); + template = template.replace("SOURCE", templateSource.get(this._taskParameters.sourceImageType.toLowerCase())); + template = template.replace("DISTRIBUTE", templateDistribute.get(this._taskParameters.distributeType.toLowerCase())); + var customizers; + if (Utils_1.default.IsEqual(this._taskParameters.provisioner, "shell") && (this._taskParameters.customizerSource == undefined || this._taskParameters.customizerSource.length == 0)) { + customizers = templateCustomizer.get("shellInline"); + } + else { + customizers = templateCustomizer.get(this._taskParameters.provisioner); + } + if (Utils_1.default.IsEqual(this._taskParameters.provisioner, "powershell") && this._taskParameters.windowsUpdateProvisioner) + customizers = customizers + "," + templateCustomizer.get("windowsUpdate"); + template = template.replace("CUSTOMIZE", customizers); + var templateJson = JSON.parse(template); + templateJson.location = this._taskParameters.location; + if (Utils_1.default.IsEqual(templateJson.properties.source.type, "PlatformImage")) { + templateJson.properties.source.publisher = this._taskParameters.imagePublisher; + templateJson.properties.source.offer = this._taskParameters.imageOffer; + templateJson.properties.source.sku = this._taskParameters.imageSku; + if (Utils_1.default.IsEqual(this._taskParameters.baseImageVersion, "latest")) + templateJson.properties.source.version = yield this.getLatestVersion(subscriptionId); + else + templateJson.properties.source.version = this._taskParameters.baseImageVersion; + } + else if (Utils_1.default.IsEqual(templateJson.properties.source.type, "ManagedImage")) + templateJson.properties.source.imageId = this._taskParameters.sourceResourceId; + else + templateJson.properties.source.imageVersionId = this._taskParameters.imageVersionId; + // customize + if (Utils_1.default.IsEqual(this._taskParameters.provisioner, "shell")) { + var inline = "#\n"; + if (!(this._taskParameters.buildFolder == "")) { + var packageName = `/tmp/${this._taskParameters.buildFolder}`; + templateJson.properties.customize[0].sourceUri = blobUrl; + templateJson.properties.customize[0].destination = `${packageName}.tar.gz`; + inline += `mkdir -p ${packageName}\n`; + inline += `sudo tar -xzvf ${templateJson.properties.customize[0].destination} -C ${packageName}\n`; + if (this._taskParameters.inlineScript) + inline += `${this._taskParameters.inlineScript}\n`; + templateJson.properties.customize[1].inline = inline.split("\n"); + } + else { + if (this._taskParameters.inlineScript) + inline += `${this._taskParameters.inlineScript}\n`; + templateJson.properties.customize[0].inline = inline.split("\n"); + } + } + else if (Utils_1.default.IsEqual(this._taskParameters.provisioner, "powershell")) { + var inline = ""; + if (!(this._taskParameters.buildFolder == "")) { + var packageName = "c:\\" + this._taskParameters.buildFolder; + inline += `Invoke-WebRequest -Uri '${blobUrl}' -OutFile ${packageName}.zip -UseBasicParsing\n`; + inline += `Expand-Archive -Path ${packageName}.zip -DestinationPath ${packageName}\n`; + } + if (this._taskParameters.inlineScript) + inline += `${this._taskParameters.inlineScript}\n`; + templateJson.properties.customize[0].inline = inline.split("\n"); + } + if (Utils_1.default.IsEqual(templateJson.properties.distribute[0].type, "ManagedImage")) { + if (this._taskParameters.imageIdForDistribute == "" || this._taskParameters.imageIdForDistribute == undefined) { + var imageDefn = "mi_" + Utils_1.getCurrentTime(); + templateJson.properties.distribute[0].imageId = `/subscriptions/${subscriptionId}/resourceGroups/${this._taskParameters.resourceGroupName}/providers/Microsoft.Compute/images/${imageDefn}`; + } + else { + templateJson.properties.distribute[0].imageId = this._taskParameters.imageIdForDistribute; + } + templateJson.properties.distribute[0].location = this._taskParameters.managedImageLocation; + } + if (Utils_1.default.IsEqual(templateJson.properties.distribute[0].type, "SharedImage")) { + templateJson.properties.distribute[0].galleryImageId = this._taskParameters.galleryImageId; + var regions = this._taskParameters.replicationRegions.split(","); + templateJson.properties.distribute[0].replicationRegions = regions; + } + if (Utils_1.default.IsEqual(templateJson.properties.distribute[0].type, "SharedImage") || Utils_1.default.IsEqual(templateJson.properties.distribute[0].type, "ManagedImage")) { + templateJson.properties.distribute[0].artifactTags.RunURL = process.env.GITHUB_SERVER_URL + "/" + process.env.GITHUB_REPOSITORY + "/actions/runs/" + process.env.GITHUB_RUN_ID; + templateJson.properties.distribute[0].artifactTags.GitHubRepo = process.env.GITHUB_REPOSITORY; + templateJson.properties.distribute[0].artifactTags.GithubCommit = process.env.GITHUB_SHA; + if (this._taskParameters.distImageTags !== "" && this._taskParameters.distImageTags !== undefined) { + var distImageTags = this._taskParameters.distImageTags.split(","); + for (var i = 0; i < distImageTags.length; i++) { + var distImageTag = distImageTags[i].split(":"); + templateJson.properties.distribute[0].artifactTags[distImageTag[0]] = distImageTag[1]; + } + } + } + return templateJson; + }); + } + addUserCustomisationIfNeeded(blobUrl) { + let json = JSON.parse(this._taskParameters.templateJsonFromUser); + let customizers = json.properties.customize; + // add customization for custom source + let fileCustomizer; + if (!!this._taskParameters.customizerSource) { + let windowsUpdateCustomizer; + if (Utils_1.default.IsEqual(this._taskParameters.provisioner, "powershell") && this._taskParameters.windowsUpdateProvisioner) { + windowsUpdateCustomizer = JSON.parse("[" + templateCustomizer.get("windowsUpdate") + "]"); + for (var i = windowsUpdateCustomizer.length - 1; i >= 0; i--) { + customizers.unshift(windowsUpdateCustomizer[i]); + } + } + fileCustomizer = JSON.parse("[" + templateCustomizer.get(this._taskParameters.provisioner) + "]"); + for (var i = fileCustomizer.length - 1; i >= 0; i--) { + customizers.unshift(fileCustomizer[i]); + } + json.properties.customize = customizers; + if (Utils_1.default.IsEqual(this._taskParameters.provisioner, "shell")) { + var inline = "#\n"; + if (!(this._taskParameters.buildFolder == "")) { + var packageName = `/tmp/${this._taskParameters.buildFolder}`; + json.properties.customize[0].sourceUri = blobUrl; + json.properties.customize[0].destination = `${packageName}.tar.gz`; + inline += `mkdir -p ${packageName}\n`; + inline += `sudo tar -xzvf ${json.properties.customize[0].destination} -C ${packageName}\n`; + } + if (this._taskParameters.inlineScript) + inline += `${this._taskParameters.inlineScript}\n`; + json.properties.customize[1].inline = inline.split("\n"); + } + else if (Utils_1.default.IsEqual(this._taskParameters.provisioner, "powershell")) { + var inline = ""; + if (!(this._taskParameters.buildFolder == "")) { + var packageName = "c:\\" + this._taskParameters.buildFolder; + inline += `Invoke-WebRequest -Uri '${blobUrl}' -OutFile ${packageName}.zip -UseBasicParsing\n`; + inline += `Expand-Archive -Path ${packageName}.zip -DestinationPath ${packageName}\n`; + } + if (this._taskParameters.inlineScript) + inline += `${this._taskParameters.inlineScript}\n`; + json.properties.customize[0].inline = inline.split("\n"); + } + } + json.properties.customize = customizers; + return json; + } +} +exports["default"] = BuildTemplate; + + +/***/ }), + +/***/ 44871: +/***/ (function(__unused_webpack_module, exports, __nccwpck_require__) { + +"use strict"; + +var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } }); +}) : (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + o[k2] = m[k]; +})); +var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) { + Object.defineProperty(o, "default", { enumerable: true, value: v }); +}) : function(o, v) { + o["default"] = v; +}); +var __importStar = (this && this.__importStar) || function (mod) { + if (mod && mod.__esModule) return mod; + var result = {}; + if (mod != null) for (var k in mod) if (k !== "default" && Object.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k); + __setModuleDefault(result, mod); + return result; +}; +var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) { + function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); } + return new (P || (P = Promise))(function (resolve, reject) { + function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } } + function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } } + function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); } + step((generator = generator.apply(thisArg, _arguments || [])).next()); + }); +}; +var __importDefault = (this && this.__importDefault) || function (mod) { + return (mod && mod.__esModule) ? mod : { "default": mod }; +}; +Object.defineProperty(exports, "__esModule", ({ value: true })); +const Q = __nccwpck_require__(52857); +const path = __nccwpck_require__(71017); +const core = __importStar(__nccwpck_require__(87454)); +const exec = __importStar(__nccwpck_require__(33683)); +const io = __importStar(__nccwpck_require__(27692)); +const TaskParameters_1 = __importDefault(__nccwpck_require__(34380)); +const Utils_1 = __nccwpck_require__(17205); +const AzureImageBuilderClient_1 = __importDefault(__nccwpck_require__(3537)); +const BuildTemplate_1 = __importDefault(__nccwpck_require__(55399)); +const Util = __nccwpck_require__(73837); +const Utils_2 = __importDefault(__nccwpck_require__(17205)); +var fs = __nccwpck_require__(57147); +var archiver = __nccwpck_require__(73882); +const constants = __importStar(__nccwpck_require__(67771)); +const AzureRestClient_1 = __nccwpck_require__(66120); +var azure = __nccwpck_require__(10732); +var azPath; +var storageAccountExists = false; +class ImageBuilder { + constructor(resourceAuthorizer) { + this.isVhdDistribute = false; + this.templateName = ""; + this.storageAccount = ""; + this.containerName = ""; + this.idenityName = ""; + this.imgBuilderTemplateExists = false; + this.accountkeys = ""; + try { + this._taskParameters = new TaskParameters_1.default(); + this._buildTemplate = new BuildTemplate_1.default(resourceAuthorizer, this._taskParameters); + this._aibClient = new AzureImageBuilderClient_1.default(resourceAuthorizer, this._taskParameters); + this._client = new AzureRestClient_1.ServiceClient(resourceAuthorizer); + this.idenityName = this._taskParameters.managedIdentity; + } + catch (error) { + throw (`Error happened while initializing Image builder: ${error}`); + } + } + execute() { + return __awaiter(this, void 0, void 0, function* () { + try { + azPath = yield io.which("az", true); + core.debug("Az module path: " + azPath); + var outStream = ''; + yield this.executeAzCliCommand("--version"); + yield this.registerFeatures(); + //GENERAL INPUTS + outStream = yield this.executeAzCliCommand("account show"); + var subscriptionId = JSON.parse(`${outStream}`).id.toString(); + var isCreateBlob = false; + var imgBuilderId = ""; + if (this._taskParameters.customizerSource != undefined && this._taskParameters.customizerSource != "") { + isCreateBlob = true; + } + if (!this._taskParameters.isTemplateJsonProvided) { + if (this.idenityName.startsWith("/subscriptions/")) { + imgBuilderId = this.idenityName; + } + else { + imgBuilderId = `/subscriptions/${subscriptionId}/resourcegroups/${this._taskParameters.resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/${this.idenityName}`; + } + } + else { + var template = JSON.parse(this._taskParameters.templateJsonFromUser); + this._taskParameters.location = template.location; + } + + this.templateName = this.getTemplateName(); + var runOutputName = this.getRunoutputName(); + + console.log("Using Managed Identity " + this.idenityName); + var blobUrl = ""; + if (isCreateBlob) { + //create a blob service + yield this.createStorageAccount(this.templateName); + this._blobService = azure.createBlobService(this.storageAccount, this.accountkeys); + this.containerName = constants.containerName; + var blobName = this._taskParameters.buildFolder + "/" + process.env.GITHUB_RUN_ID + "/" + this._taskParameters.buildFolder + `_${Utils_1.getCurrentTime()}`; + if (Utils_2.default.IsEqual(this._taskParameters.provisioner, "powershell")) + blobName = blobName + '.zip'; + else + blobName = blobName + '.tar.gz'; + blobUrl = yield this.uploadPackage(this.containerName, blobName); + core.debug("Blob Url: " + blobUrl); + } + let templateJson = ""; + if (!this._taskParameters.isTemplateJsonProvided) { + templateJson = yield this._buildTemplate.getTemplate(blobUrl, imgBuilderId, subscriptionId); + } + else { + templateJson = this._buildTemplate.addUserCustomisationIfNeeded(blobUrl); + } + templateJson.properties.distribute[0].runOutputName = runOutputName; + this.isVhdDistribute = templateJson.properties.distribute[0].type == "VHD"; + var templateStr = JSON.stringify(templateJson, null, 2); + console.log("Template Name: " + this.templateName); + console.log("Template: \n" + templateStr); + yield this._aibClient.putImageTemplate(templateStr, this.templateName, subscriptionId); + this.imgBuilderTemplateExists = true; + yield this._aibClient.runTemplate(this.templateName, subscriptionId, this._taskParameters.buildTimeoutInMinutes); + var templateID = yield this._aibClient.getTemplateId(this.templateName, subscriptionId); + + if (this._taskParameters.actionRunMode !== "nowait"){ + var out = yield this._aibClient.getRunOutput(this.templateName, runOutputName, subscriptionId); + var imagebuilderRunStatus = "failed"; + core.setOutput('templateName', this.templateName); + core.setOutput('templateId', templateID); + core.setOutput('run-output-name', runOutputName); + if (out) { + core.setOutput('custom-image-uri', out); + core.setOutput('imagebuilder-run-status', "succeeded"); + imagebuilderRunStatus = "succeeded"; + } + } + else{ + out = "" + core.setOutput('custom-image-uri', out); + core.setOutput('imagebuilder-run-status', "skipped"); + imagebuilderRunStatus = "skipped"; + } + + if (Utils_2.default.IsEqual(templateJson.properties.source.type, "PlatformImage")) { + core.setOutput('pirPublisher', templateJson.properties.source.publisher); + core.setOutput('pirOffer', templateJson.properties.source.offer); + core.setOutput('pirSku', templateJson.properties.source.sku); + core.setOutput('pirVersion', templateJson.properties.source.version); + } + + console.log("=============================================================================="); + console.log("## task output variables ##"); + console.log("$(imagebuilder-run-status) = ", imagebuilderRunStatus); + console.log("$(imageUri) = ", out); + if (this.isVhdDistribute) { + console.log("$(templateName) = ", this.templateName); + console.log("$(templateId) = ", templateID); + } + console.log("=============================================================================="); + } + catch (error) { + throw error; + } + finally { + var outStream = yield this.executeAzCliCommand(`group exists -n ${this._taskParameters.resourceGroupName}`); + if (outStream) { + if (imagebuilderRunStatus != "failed" && (this._taskParameters.actionRunMode == "nowait")){ + console.log("skipping cleanup action run mode set to nowait") + return + } + this.cleanup(subscriptionId); + } + } + }); + } + createStorageAccount(templateName) { + return __awaiter(this, void 0, void 0, function* () { + this.storageAccount = Util.format('%s%s', constants.storageAccountName, Utils_1.getCurrentTime()); + yield this.executeAzCliCommand(`storage account create --name "${this.storageAccount}" --resource-group "${this._taskParameters.resourceGroupName}" --location "${this._taskParameters.location}" --sku Standard_RAGRS --tags "imageTemplateName=${templateName}"`); + core.debug("Created storage account " + this.storageAccount); + var outStream = yield this.executeAzCliCommand(`storage account keys list -g "${this._taskParameters.resourceGroupName}" -n "${this.storageAccount}"`); + this.accountkeys = JSON.parse(`${outStream}`)[0].value; + storageAccountExists = true; + }); + } + registerFeatures() { + return __awaiter(this, void 0, void 0, function* () { + var outStream = yield this.executeAzCliCommand(`feature show --namespace Microsoft.VirtualMachineImages --name VirtualMachineTemplatePreview`); + if (JSON.parse(outStream) && !Utils_2.default.IsEqual(JSON.parse(outStream).properties.state, "Registered")) { + core.info("Registering Microsoft.VirtualMachineImages"); + yield this.executeAzCliCommand("feature register --namespace Microsoft.VirtualMachineImages --name VirtualMachineTemplatePreview"); + outStream = yield this.executeAzCliCommand(`feature show --namespace Microsoft.VirtualMachineImages --name VirtualMachineTemplatePreview`); + while (!Utils_2.default.IsEqual(JSON.parse(outStream).properties.state, "Registered")) { + this.sleepFor(1); + outStream = yield this.executeAzCliCommand(`feature show --namespace Microsoft.VirtualMachineImages --name VirtualMachineTemplatePreview`); + } + } + outStream = ''; + outStream = yield this.executeAzCliCommand(`provider show -n Microsoft.VirtualMachineImages`); + if (JSON.parse(outStream) && !Utils_2.default.IsEqual(JSON.parse(outStream).registrationState, "Registered")) { + yield this.executeAzCliCommand("provider register -n Microsoft.VirtualMachineImages"); + outStream = yield this.executeAzCliCommand(`provider show -n Microsoft.VirtualMachineImages`); + while (JSON.parse(outStream) && !Utils_2.default.IsEqual(JSON.parse(outStream).registrationState, "Registered")) { + this.sleepFor(1); + outStream = yield this.executeAzCliCommand(`provider show -n Microsoft.VirtualMachineImages`); + } + } + outStream = ''; + outStream = yield this.executeAzCliCommand(`provider show -n Microsoft.Storage`); + if (JSON.parse(outStream) && !Utils_2.default.IsEqual(JSON.parse(outStream).registrationState, "Registered")) { + core.info("Registering Microsoft.Storage"); + yield this.executeAzCliCommand("provider register -n Microsoft.Storage"); + outStream = yield this.executeAzCliCommand(`provider show -n Microsoft.Storage`); + while (JSON.parse(outStream) && !Utils_2.default.IsEqual(JSON.parse(outStream).registrationState, "Registered")) { + this.sleepFor(1); + outStream = yield this.executeAzCliCommand(`provider show -n Microsoft.Storage`); + } + } + outStream = ''; + outStream = yield this.executeAzCliCommand(`provider show -n Microsoft.Compute`); + if (JSON.parse(outStream) && !Utils_2.default.IsEqual(JSON.parse(outStream).registrationState, "Registered")) { + core.info("Registering Microsoft.Compute"); + yield this.executeAzCliCommand("provider register -n Microsoft.Compute"); + outStream = yield this.executeAzCliCommand(`provider show -n Microsoft.Compute`); + while (JSON.parse(outStream) && !Utils_2.default.IsEqual(JSON.parse(outStream).registrationState, "Registered")) { + this.sleepFor(1); + outStream = yield this.executeAzCliCommand(`provider show -n Microsoft.Compute`); + } + } + outStream = ''; + outStream = yield this.executeAzCliCommand(`provider show -n Microsoft.KeyVault`); + if (JSON.parse(outStream) && !Utils_2.default.IsEqual(JSON.parse(outStream).registrationState, "Registered")) { + core.info("Registering Microsoft.KeyVault"); + yield this.executeAzCliCommand("provider register -n Microsoft.KeyVault"); + outStream = yield this.executeAzCliCommand(`provider show -n Microsoft.KeyVault`); + while (JSON.parse(outStream) && !Utils_2.default.IsEqual(JSON.parse(outStream).registrationState, "Registered")) { + this.sleepFor(1); + outStream = yield this.executeAzCliCommand(`provider show -n Microsoft.KeyVault`); + } + } + }); + } + getTemplateName() { + if (this._taskParameters.isTemplateJsonProvided) { + var templateName = this.getTemplateNameFromProvidedJson(this._taskParameters.templateJsonFromUser); + return templateName == "" ? constants.imageTemplateName + Utils_1.getCurrentTime() : templateName; + } + else if (!this._taskParameters.isTemplateJsonProvided && this._taskParameters.imagebuilderTemplateName) { + return this._taskParameters.imagebuilderTemplateName; + } + return constants.imageTemplateName + Utils_1.getCurrentTime(); + } + getRunoutputName() { + var runOutputName = this._taskParameters.runOutputName; + if (runOutputName == "") { + if (this._taskParameters.isTemplateJsonProvided) { + var runOutputName = this.getRunoutputNameFromProvidedJson(this._taskParameters.templateJsonFromUser); + return runOutputName == "" ? this.templateName + "_" + process.env.GITHUB_RUN_ID : runOutputName; + } + else { + return this.templateName + "_" + process.env.GITHUB_RUN_ID; + } + } + return ""; + } + getTemplateNameFromProvidedJson(templateJson) { + var template = JSON.parse(templateJson); + if (template.tags && template.tags.imagebuilderTemplate) { + return template.tags.imagebuilderTemplate; + } + return ""; + } + getRunoutputNameFromProvidedJson(templateJson) { + var template = JSON.parse(templateJson); + if (template.properties.distribute && template.properties.distribute[0].runOutputName) { + return template.properties.distribute[0].runOutputName; + } + return ""; + } + uploadPackage(containerName, blobName) { + return __awaiter(this, void 0, void 0, function* () { + var defer = Q.defer(); + var archivedWebPackage; + var temp = this._generateTemporaryFile(`${process.env.GITHUB_WORKSPACE}`); + try { + if (Utils_2.default.IsEqual(this._taskParameters.provisioner, "powershell")) { + temp = temp + `.zip`; + archivedWebPackage = yield this.createArchiveTar(this._taskParameters.buildPath, temp, "zip"); + } + else { + temp = temp + `.tar.gz`; + archivedWebPackage = yield this.createArchiveTar(this._taskParameters.buildPath, temp, "tar"); + } + } + catch (error) { + defer.reject(console.log(`unable to create archive build: ${error}`)); + } + console.log(`created archive ` + archivedWebPackage); + this._blobService.createContainerIfNotExists(containerName, (error) => { + if (error) { + defer.reject(console.log(`unable to create container ${containerName} in storage account: ${error}`)); + } + //upoading package + this._blobService.createBlockBlobFromLocalFile(containerName, blobName, archivedWebPackage, (error, result) => { + if (error) { + defer.reject(console.log(`unable to create blob ${blobName} in container ${containerName} in storage account: ${error}`)); + } + //generating SAS URL + var startDate = new Date(); + var expiryDate = new Date(startDate); + expiryDate.setFullYear(startDate.getUTCFullYear() + 1); + startDate.setMinutes(startDate.getMinutes() - 5); + var sharedAccessPolicy = { + AccessPolicy: { + Permissions: azure.BlobUtilities.SharedAccessPermissions.READ, + Start: startDate, + Expiry: expiryDate + } + }; + var token = this._blobService.generateSharedAccessSignature(containerName, blobName, sharedAccessPolicy); + var blobUrl = this._blobService.getUrl(containerName, blobName, token); + defer.resolve(blobUrl); + }); + }); + return defer.promise; + }); + } + createArchiveTar(folderPath, targetPath, extension) { + return __awaiter(this, void 0, void 0, function* () { + var defer = Q.defer(); + console.log('Archiving ' + folderPath + ' to ' + targetPath); + var output = fs.createWriteStream(targetPath); + var archive; + if (Utils_2.default.IsEqual(extension, 'zip')) { + archive = archiver('zip', { zlib: { level: 9 } }); + } + else { + archive = archiver('tar', { + gzip: true, + gzipOptions: { + level: 1 + } + }); + } + output.on('close', function () { + console.log(archive.pointer() + ' total bytes'); + core.debug('Successfully created archive ' + targetPath); + defer.resolve(targetPath); + }); + output.on('error', function (error) { + defer.reject(error); + }); + var stats = fs.statSync(folderPath); + if (stats.isFile()) { + archive.file(folderPath, { name: this._taskParameters.buildFolder }); + } + else { + archive.glob("**", { + cwd: folderPath, + dot: true + }); + } + archive.pipe(output); + archive.finalize(); + return defer.promise; + }); + } + _generateTemporaryFile(folderPath) { + var randomString = Math.random().toString().split('.')[1]; + var tempPath = path.join(folderPath, '/temp_web_package_' + randomString); + return tempPath; + } + cleanup(subscriptionId) { + return __awaiter(this, void 0, void 0, function* () { + try { + if (!this.isVhdDistribute && this.imgBuilderTemplateExists && this._taskParameters.actionRunMode == "full") { + yield this._aibClient.deleteTemplate(this.templateName, subscriptionId); + console.log(`${this.templateName} got deleted`); + } + if (storageAccountExists) { + let httpRequest = { + method: 'DELETE', + uri: this._client.getRequestUri(`subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{storageAccount}`, { '{subscriptionId}': subscriptionId, '{resourceGroupName}': this._taskParameters.resourceGroupName, '{storageAccount}': this.storageAccount }, [], "2019-06-01") + }; + var response = yield this._client.beginRequest(httpRequest); + console.log("storage account " + this.storageAccount + " deleted"); + } + } + catch (error) { + console.log(`Error in cleanup: `, error); + } + }); + } + executeAzCliCommand(command) { + return __awaiter(this, void 0, void 0, function* () { + var outStream = ''; + var errorStream = ''; + var execOptions = { + outStream: new Utils_1.NullOutstreamStringWritable({ decodeStrings: false }), + listeners: { + stdout: (data) => outStream += data.toString(), + errline: (data) => { + errorStream += data; + } + } + }; + try { + yield exec.exec(`"${azPath}" ${command}`, [], execOptions); + return outStream; + } + catch (error) { + if (errorStream != '') + throw (`${errorStream} ${error}`); + else + throw (`${error}`); + } + }); + } + sleepFor(sleepDurationInSeconds) { + return new Promise((resolve) => { + setTimeout(resolve, sleepDurationInSeconds * 1000); + }); + } +} +exports["default"] = ImageBuilder; + + +/***/ }), + +/***/ 34380: +/***/ (function(__unused_webpack_module, exports, __nccwpck_require__) { + +"use strict"; + +var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } }); +}) : (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + o[k2] = m[k]; +})); +var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) { + Object.defineProperty(o, "default", { enumerable: true, value: v }); +}) : function(o, v) { + o["default"] = v; +}); +var __importStar = (this && this.__importStar) || function (mod) { + if (mod && mod.__esModule) return mod; + var result = {}; + if (mod != null) for (var k in mod) if (k !== "default" && Object.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k); + __setModuleDefault(result, mod); + return result; +}; +var __importDefault = (this && this.__importDefault) || function (mod) { + return (mod && mod.__esModule) ? mod : { "default": mod }; +}; +Object.defineProperty(exports, "__esModule", ({ value: true })); +const path = __nccwpck_require__(71017); +const tl = __importStar(__nccwpck_require__(87454)); +const constants = __importStar(__nccwpck_require__(67771)); +const Utils_1 = __importDefault(__nccwpck_require__(17205)); +var fs = __nccwpck_require__(57147); +class TaskParameters { + constructor() { + // action inputs + this.actionRunMode = "" + // image builder inputs + this.resourceGroupName = ""; + this.location = ""; + this.isTemplateJsonProvided = false; + this.templateJsonFromUser = ''; + this.buildTimeoutInMinutes = 240; + this.vmSize = ""; + this.managedIdentity = ""; + // source + this.sourceImageType = ""; + this.sourceOSType = ""; + this.sourceResourceId = ""; + this.imageVersionId = ""; + this.baseImageVersion = ""; + this.imagePublisher = ""; + this.imageOffer = ""; + this.imageSku = ""; + //customize + this.buildPath = ""; + this.buildFolder = ""; + this.blobName = ""; + this.provisioner = ""; + this.customizerSource = ""; + this.customizerScript = ""; + this.customizerWindowsUpdate = ""; + //distribute + this.distributeType = ""; + this.imageIdForDistribute = ""; + this.replicationRegions = ""; + this.managedImageLocation = ""; + this.galleryImageId = ""; + this.distImageTags = ""; + var locations = ["eastus", "eastus2", "westcentralus", "westus", "westus2", "southcentralus", "northeurope", "westeurope", "southeastasia", "australiasoutheast", "australia", "uksouth", "ukwest" ]; + console.log("start reading task parameters..."); + + this.actionRunMode = tl.getInput(constants.ActionRunMode, { required: true }).toLowerCase(); + if (!this.actionRunMode){ + this.actionRunMode = "full" + } + + console.log(`Action run mode set: ${this.actionRunMode}`) + + this.imagebuilderTemplateName = tl.getInput(constants.ImageBuilderTemplateName); + if (this.imagebuilderTemplateName.indexOf(".json") > -1) { + this.isTemplateJsonProvided = true; + var data = fs.readFileSync(this.imagebuilderTemplateName, 'utf8'); + this.templateJsonFromUser = JSON.parse(JSON.stringify(data)); + } + this.resourceGroupName = tl.getInput(constants.ResourceGroupName, { required: true }); + this.buildTimeoutInMinutes = parseInt(tl.getInput(constants.BuildTimeoutInMinutes)); + this.sourceOSType = tl.getInput(constants.SourceOSType, { required: true }); + if (Utils_1.default.IsEqual(this.sourceOSType, "windows")) { + this.provisioner = "powershell"; + } + else { + this.provisioner = "shell"; + } + if (!this.isTemplateJsonProvided) { + //general inputs + this.location = tl.getInput(constants.Location, { required: true }); + if (!(locations.indexOf(this.location.toString().replace(/\s/g, "").toLowerCase()) > -1)) { + throw new Error("location not from available regions or it is not defined"); + } + this.managedIdentity = tl.getInput(constants.ManagedIdentity, { required: true }); + //vm size + this.vmSize = tl.getInput(constants.VMSize); + //source inputs + this.sourceImageType = tl.getInput(constants.SourceImageType); + var sourceImage = tl.getInput(constants.SourceImage, { required: true }); + if (Utils_1.default.IsEqual(this.sourceImageType, constants.platformImageSourceTypeImage) || Utils_1.default.IsEqual(this.sourceImageType, constants.marketPlaceSourceTypeImage)) { + this.sourceImageType = constants.platformImageSourceTypeImage; + this._extractImageDetails(sourceImage); + } + else if (Utils_1.default.IsEqual(this.sourceImageType, constants.managedImageSourceTypeImage)) { + this.sourceResourceId = sourceImage; + } + else { + this.imageVersionId = sourceImage; + } + } + //customize inputs + this.customizerSource = tl.getInput(constants.CustomizerSource).toString(); + if (this.customizerSource == undefined || this.customizerSource == "" || this.customizerSource == null) { + var artifactsPath = path.join(`${process.env.GITHUB_WORKSPACE}`, "workflow-artifacts"); + if (fs.existsSync(artifactsPath)) { + this.customizerSource = artifactsPath; + } + } + if (!(this.customizerSource == undefined || this.customizerSource == '' || this.customizerSource == null)) { + var bp = this.customizerSource; + var x = bp.split(path.sep); + this.buildFolder = x[x.length - 1].split(".")[0]; + this.buildPath = path.normalize(bp.trim()); + console.log("Customizer source: " + this.customizerSource); + console.log("Artifacts folder: " + this.buildFolder); + } + this.customizerScript = tl.getInput(constants.customizerScript).toString(); + this.inlineScript = tl.getInput(constants.customizerScript); + if (Utils_1.default.IsEqual(tl.getInput(constants.customizerWindowsUpdate), "true")) { + this.windowsUpdateProvisioner = true; + } + else { + this.windowsUpdateProvisioner = false; + } + //distribute inputs + if (!this.isTemplateJsonProvided) { + this.distributeType = tl.getInput(constants.DistributeType); + const distResourceId = tl.getInput(constants.DistResourceId); + const distLocation = tl.getInput(constants.DistLocation); + if (!(Utils_1.default.IsEqual(this.distributeType, "VHD") || Utils_1.default.IsEqual(this.distributeType, "ManagedImage"))) { + if (distResourceId == "" || distResourceId == undefined) { + throw Error("Distributor Resource Id is required"); + } + if (distLocation == undefined || distLocation == "") { + throw Error("Distributor Location is required"); + } + } + if (Utils_1.default.IsEqual(this.distributeType, constants.managedImageSourceTypeImage)) { + if (distResourceId) { + this.imageIdForDistribute = distResourceId; + } + this.managedImageLocation = this.location; + } + else if (Utils_1.default.IsEqual(this.distributeType, constants.sharedImageGallerySourceTypeImage)) { + this.galleryImageId = distResourceId; + this.replicationRegions = distLocation; + } + this.distImageTags = tl.getInput(constants.DistImageTags); + } + this.runOutputName = tl.getInput(constants.RunOutputName); + console.log("end reading parameters"); + } + _extractImageDetails(img) { + this.imagePublisher = ""; + this.imageOffer = ""; + this.imageSku = ""; + this.baseImageVersion; + var parts = img.split(':'); + if (parts.length != 4) { + throw Error("Platform Base Image should have '{publisher}:{offer}:{sku}:{version}'. All fields are required."); + } + this.imagePublisher = parts[0]; + this.imageOffer = parts[1]; + this.imageSku = parts[2]; + this.baseImageVersion = parts[3]; + } +} +exports["default"] = TaskParameters; + + +/***/ }), + +/***/ 17205: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { + +"use strict"; + +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.NullOutstreamStringWritable = exports.getCurrentTime = void 0; +const stream = __nccwpck_require__(12781); +class Utils { + static IsEqual(a, b) { + if (a !== undefined && a != null && b != null && b !== undefined) { + return a.toLowerCase() == b.toLowerCase(); + } + return false; + } +} +exports["default"] = Utils; +exports.getCurrentTime = () => { + return new Date().getTime().toString(); +}; +class NullOutstreamStringWritable extends stream.Writable { + constructor(options) { + super(options); + } + _write(data, encoding, callback) { + if (callback) { + callback(); + } + } +} +exports.NullOutstreamStringWritable = NullOutstreamStringWritable; +; + + +/***/ }), + +/***/ 67771: +/***/ ((__unused_webpack_module, exports) => { + +"use strict"; + +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.ActionRunMode = "action-run-mode"; +exports.imageTemplateName = exports.containerName = exports.storageAccountName = exports.DistImageTags = exports.RunOutputName = exports.DistLocation = exports.DistResourceId = exports.DistributeType = exports.customizerDestination = exports.customizerScript = exports.customizerWindowsUpdate = exports.WindowsUpdateProvisioner = exports.InlineScript = exports.CustomizerSource = exports.sharedImageGallerySourceTypeImage = exports.managedImageSourceTypeImage = exports.marketPlaceSourceTypeImage = exports.platformImageSourceTypeImage = exports.SourceImage = exports.SourceOSType = exports.SourceImageType = exports.ManagedIdentity = exports.VMSize = exports.BuildTimeoutInMinutes = exports.ImageBuilderTemplateName = exports.ResourceGroupName = exports.Location = void 0; +exports.Location = "location"; +exports.ResourceGroupName = "resource-group-name"; +exports.ImageBuilderTemplateName = "image-builder-template-name"; +exports.BuildTimeoutInMinutes = "build-timeout-in-minutes"; +exports.VMSize = "vm-size"; +exports.ManagedIdentity = "managed-identity"; +exports.SourceImageType = "source-image-type"; +exports.SourceOSType = "source-os-type"; +exports.SourceImage = "source-image"; +exports.platformImageSourceTypeImage = "platformimage"; +exports.marketPlaceSourceTypeImage = "marketplace"; +exports.managedImageSourceTypeImage = "managedimage"; +exports.sharedImageGallerySourceTypeImage = "SharedImageGallery"; +exports.CustomizerSource = "customizer-source"; +exports.InlineScript = "inlineScript"; +exports.WindowsUpdateProvisioner = "windowsUpdateProvisioner"; +exports.customizerWindowsUpdate = "customizer-windows-update"; +exports.customizerScript = "customizer-script"; +exports.customizerDestination = "customizer-destination"; +exports.DistributeType = "dist-type"; +exports.DistResourceId = "dist-resource-id"; +exports.DistLocation = "dist-location"; +exports.RunOutputName = "run-output-name"; +exports.DistImageTags = "dist-image-tags"; +exports.storageAccountName = "strgacc"; +exports.containerName = "imagebuilder-aib-action"; +exports.imageTemplateName = "imagebuilderTemplate_"; + + +/***/ }), + +/***/ 34853: +/***/ (function(__unused_webpack_module, exports, __nccwpck_require__) { + +"use strict"; + +var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } }); +}) : (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + o[k2] = m[k]; +})); +var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) { + Object.defineProperty(o, "default", { enumerable: true, value: v }); +}) : function(o, v) { + o["default"] = v; +}); +var __importStar = (this && this.__importStar) || function (mod) { + if (mod && mod.__esModule) return mod; + var result = {}; + if (mod != null) for (var k in mod) if (k !== "default" && Object.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k); + __setModuleDefault(result, mod); + return result; +}; +var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) { + function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); } + return new (P || (P = Promise))(function (resolve, reject) { + function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } } + function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } } + function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); } + step((generator = generator.apply(thisArg, _arguments || [])).next()); + }); +}; +var __importDefault = (this && this.__importDefault) || function (mod) { + return (mod && mod.__esModule) ? mod : { "default": mod }; +}; +Object.defineProperty(exports, "__esModule", ({ value: true })); +const ImageBuilder_1 = __importDefault(__nccwpck_require__(44871)); +const AuthorizerFactory_1 = __nccwpck_require__(52825); +const core = __importStar(__nccwpck_require__(87454)); +function main() { + return __awaiter(this, void 0, void 0, function* () { + let azureResourceAuthorizer = yield AuthorizerFactory_1.AuthorizerFactory.getAuthorizer(); + var ib = new ImageBuilder_1.default(azureResourceAuthorizer); + yield ib.execute(); + }); +} +main().then() + .catch((error) => { + console.log("$(imagebuilder-run-status) = ", "failed"); + core.setOutput('imagebuilder-run-status', "failed"); + core.error(error); + core.setFailed("Action run failed."); +}); + + +/***/ }), + +/***/ 53290: +/***/ (function(__unused_webpack_module, exports, __nccwpck_require__) { + +"use strict"; + +var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } }); +}) : (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + o[k2] = m[k]; +})); +var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) { + Object.defineProperty(o, "default", { enumerable: true, value: v }); +}) : function(o, v) { + o["default"] = v; +}); +var __importStar = (this && this.__importStar) || function (mod) { + if (mod && mod.__esModule) return mod; + var result = {}; + if (mod != null) for (var k in mod) if (k !== "default" && Object.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k); + __setModuleDefault(result, mod); + return result; +}; +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.issue = exports.issueCommand = void 0; +const os = __importStar(__nccwpck_require__(22037)); +const utils_1 = __nccwpck_require__(50421); +/** + * Commands + * + * Command Format: + * ::name key=value,key=value::message + * + * Examples: + * ::warning::This is the message + * ::set-env name=MY_VAR::some value + */ +function issueCommand(command, properties, message) { + const cmd = new Command(command, properties, message); + process.stdout.write(cmd.toString() + os.EOL); +} +exports.issueCommand = issueCommand; +function issue(name, message = '') { + issueCommand(name, {}, message); +} +exports.issue = issue; +const CMD_STRING = '::'; +class Command { + constructor(command, properties, message) { + if (!command) { + command = 'missing.command'; + } + this.command = command; + this.properties = properties; + this.message = message; + } + toString() { + let cmdStr = CMD_STRING + this.command; + if (this.properties && Object.keys(this.properties).length > 0) { + cmdStr += ' '; + let first = true; + for (const key in this.properties) { + if (this.properties.hasOwnProperty(key)) { + const val = this.properties[key]; + if (val) { + if (first) { + first = false; + } + else { + cmdStr += ','; + } + cmdStr += `${key}=${escapeProperty(val)}`; + } + } + } + } + cmdStr += `${CMD_STRING}${escapeData(this.message)}`; + return cmdStr; + } +} +function escapeData(s) { + return utils_1.toCommandValue(s) + .replace(/%/g, '%25') + .replace(/\r/g, '%0D') + .replace(/\n/g, '%0A'); +} +function escapeProperty(s) { + return utils_1.toCommandValue(s) + .replace(/%/g, '%25') + .replace(/\r/g, '%0D') + .replace(/\n/g, '%0A') + .replace(/:/g, '%3A') + .replace(/,/g, '%2C'); +} +//# sourceMappingURL=command.js.map + +/***/ }), + +/***/ 87454: +/***/ (function(__unused_webpack_module, exports, __nccwpck_require__) { + +"use strict"; + +var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } }); +}) : (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + o[k2] = m[k]; +})); +var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) { + Object.defineProperty(o, "default", { enumerable: true, value: v }); +}) : function(o, v) { + o["default"] = v; +}); +var __importStar = (this && this.__importStar) || function (mod) { + if (mod && mod.__esModule) return mod; + var result = {}; + if (mod != null) for (var k in mod) if (k !== "default" && Object.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k); + __setModuleDefault(result, mod); + return result; +}; +var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) { + function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); } + return new (P || (P = Promise))(function (resolve, reject) { + function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } } + function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } } + function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); } + step((generator = generator.apply(thisArg, _arguments || [])).next()); + }); +}; +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.getIDToken = exports.getState = exports.saveState = exports.group = exports.endGroup = exports.startGroup = exports.info = exports.notice = exports.warning = exports.error = exports.debug = exports.isDebug = exports.setFailed = exports.setCommandEcho = exports.setOutput = exports.getBooleanInput = exports.getMultilineInput = exports.getInput = exports.addPath = exports.setSecret = exports.exportVariable = exports.ExitCode = void 0; +const command_1 = __nccwpck_require__(53290); +const file_command_1 = __nccwpck_require__(76394); +const utils_1 = __nccwpck_require__(50421); +const os = __importStar(__nccwpck_require__(22037)); +const path = __importStar(__nccwpck_require__(71017)); +const oidc_utils_1 = __nccwpck_require__(91130); +/** + * The code to exit an action + */ +var ExitCode; +(function (ExitCode) { + /** + * A code indicating that the action was successful + */ + ExitCode[ExitCode["Success"] = 0] = "Success"; + /** + * A code indicating that the action was a failure + */ + ExitCode[ExitCode["Failure"] = 1] = "Failure"; +})(ExitCode = exports.ExitCode || (exports.ExitCode = {})); +//----------------------------------------------------------------------- +// Variables +//----------------------------------------------------------------------- +/** + * Sets env variable for this action and future actions in the job + * @param name the name of the variable to set + * @param val the value of the variable. Non-string values will be converted to a string via JSON.stringify + */ +// eslint-disable-next-line @typescript-eslint/no-explicit-any +function exportVariable(name, val) { + const convertedVal = utils_1.toCommandValue(val); + process.env[name] = convertedVal; + const filePath = process.env['GITHUB_ENV'] || ''; + if (filePath) { + const delimiter = '_GitHubActionsFileCommandDelimeter_'; + const commandValue = `${name}<<${delimiter}${os.EOL}${convertedVal}${os.EOL}${delimiter}`; + file_command_1.issueCommand('ENV', commandValue); + } + else { + command_1.issueCommand('set-env', { name }, convertedVal); + } +} +exports.exportVariable = exportVariable; +/** + * Registers a secret which will get masked from logs + * @param secret value of the secret + */ +function setSecret(secret) { + command_1.issueCommand('add-mask', {}, secret); +} +exports.setSecret = setSecret; +/** + * Prepends inputPath to the PATH (for this action and future actions) + * @param inputPath + */ +function addPath(inputPath) { + const filePath = process.env['GITHUB_PATH'] || ''; + if (filePath) { + file_command_1.issueCommand('PATH', inputPath); + } + else { + command_1.issueCommand('add-path', {}, inputPath); + } + process.env['PATH'] = `${inputPath}${path.delimiter}${process.env['PATH']}`; +} +exports.addPath = addPath; +/** + * Gets the value of an input. + * Unless trimWhitespace is set to false in InputOptions, the value is also trimmed. + * Returns an empty string if the value is not defined. + * + * @param name name of the input to get + * @param options optional. See InputOptions. + * @returns string + */ +function getInput(name, options) { + const val = process.env[`INPUT_${name.replace(/ /g, '_').toUpperCase()}`] || ''; + if (options && options.required && !val) { + throw new Error(`Input required and not supplied: ${name}`); + } + if (options && options.trimWhitespace === false) { + return val; + } + return val.trim(); +} +exports.getInput = getInput; +/** + * Gets the values of an multiline input. Each value is also trimmed. + * + * @param name name of the input to get + * @param options optional. See InputOptions. + * @returns string[] + * + */ +function getMultilineInput(name, options) { + const inputs = getInput(name, options) + .split('\n') + .filter(x => x !== ''); + return inputs; +} +exports.getMultilineInput = getMultilineInput; +/** + * Gets the input value of the boolean type in the YAML 1.2 "core schema" specification. + * Support boolean input list: `true | True | TRUE | false | False | FALSE` . + * The return value is also in boolean type. + * ref: https://yaml.org/spec/1.2/spec.html#id2804923 + * + * @param name name of the input to get + * @param options optional. See InputOptions. + * @returns boolean + */ +function getBooleanInput(name, options) { + const trueValue = ['true', 'True', 'TRUE']; + const falseValue = ['false', 'False', 'FALSE']; + const val = getInput(name, options); + if (trueValue.includes(val)) + return true; + if (falseValue.includes(val)) + return false; + throw new TypeError(`Input does not meet YAML 1.2 "Core Schema" specification: ${name}\n` + + `Support boolean input list: \`true | True | TRUE | false | False | FALSE\``); +} +exports.getBooleanInput = getBooleanInput; +/** + * Sets the value of an output. + * + * @param name name of the output to set + * @param value value to store. Non-string values will be converted to a string via JSON.stringify + */ +// eslint-disable-next-line @typescript-eslint/no-explicit-any +function setOutput(name, value) { + process.stdout.write(os.EOL); + command_1.issueCommand('set-output', { name }, value); +} +exports.setOutput = setOutput; +/** + * Enables or disables the echoing of commands into stdout for the rest of the step. + * Echoing is disabled by default if ACTIONS_STEP_DEBUG is not set. + * + */ +function setCommandEcho(enabled) { + command_1.issue('echo', enabled ? 'on' : 'off'); +} +exports.setCommandEcho = setCommandEcho; +//----------------------------------------------------------------------- +// Results +//----------------------------------------------------------------------- +/** + * Sets the action status to failed. + * When the action exits it will be with an exit code of 1 + * @param message add error issue message + */ +function setFailed(message) { + process.exitCode = ExitCode.Failure; + error(message); +} +exports.setFailed = setFailed; +//----------------------------------------------------------------------- +// Logging Commands +//----------------------------------------------------------------------- +/** + * Gets whether Actions Step Debug is on or not + */ +function isDebug() { + return process.env['RUNNER_DEBUG'] === '1'; +} +exports.isDebug = isDebug; +/** + * Writes debug message to user log + * @param message debug message + */ +function debug(message) { + command_1.issueCommand('debug', {}, message); +} +exports.debug = debug; +/** + * Adds an error issue + * @param message error issue message. Errors will be converted to string via toString() + * @param properties optional properties to add to the annotation. + */ +function error(message, properties = {}) { + command_1.issueCommand('error', utils_1.toCommandProperties(properties), message instanceof Error ? message.toString() : message); +} +exports.error = error; +/** + * Adds a warning issue + * @param message warning issue message. Errors will be converted to string via toString() + * @param properties optional properties to add to the annotation. + */ +function warning(message, properties = {}) { + command_1.issueCommand('warning', utils_1.toCommandProperties(properties), message instanceof Error ? message.toString() : message); +} +exports.warning = warning; +/** + * Adds a notice issue + * @param message notice issue message. Errors will be converted to string via toString() + * @param properties optional properties to add to the annotation. + */ +function notice(message, properties = {}) { + command_1.issueCommand('notice', utils_1.toCommandProperties(properties), message instanceof Error ? message.toString() : message); +} +exports.notice = notice; +/** + * Writes info to log with console.log. + * @param message info message + */ +function info(message) { + process.stdout.write(message + os.EOL); +} +exports.info = info; +/** + * Begin an output group. + * + * Output until the next `groupEnd` will be foldable in this group + * + * @param name The name of the output group + */ +function startGroup(name) { + command_1.issue('group', name); +} +exports.startGroup = startGroup; +/** + * End an output group. + */ +function endGroup() { + command_1.issue('endgroup'); +} +exports.endGroup = endGroup; +/** + * Wrap an asynchronous function call in a group. + * + * Returns the same type as the function itself. + * + * @param name The name of the group + * @param fn The function to wrap in the group + */ +function group(name, fn) { + return __awaiter(this, void 0, void 0, function* () { + startGroup(name); + let result; + try { + result = yield fn(); + } + finally { + endGroup(); + } + return result; + }); +} +exports.group = group; +//----------------------------------------------------------------------- +// Wrapper action state +//----------------------------------------------------------------------- +/** + * Saves state for current action, the state can only be retrieved by this action's post job execution. + * + * @param name name of the state to store + * @param value value to store. Non-string values will be converted to a string via JSON.stringify + */ +// eslint-disable-next-line @typescript-eslint/no-explicit-any +function saveState(name, value) { + command_1.issueCommand('save-state', { name }, value); +} +exports.saveState = saveState; +/** + * Gets the value of an state set by this action's main execution. + * + * @param name name of the state to get + * @returns string + */ +function getState(name) { + return process.env[`STATE_${name}`] || ''; +} +exports.getState = getState; +function getIDToken(aud) { + return __awaiter(this, void 0, void 0, function* () { + return yield oidc_utils_1.OidcClient.getIDToken(aud); + }); +} +exports.getIDToken = getIDToken; +//# sourceMappingURL=core.js.map + +/***/ }), + +/***/ 76394: +/***/ (function(__unused_webpack_module, exports, __nccwpck_require__) { + +"use strict"; + +// For internal use, subject to change. +var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } }); +}) : (function(o, m, k, k2) { + if (k2 === undefined) k2 = k; + o[k2] = m[k]; +})); +var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) { + Object.defineProperty(o, "default", { enumerable: true, value: v }); +}) : function(o, v) { + o["default"] = v; +}); +var __importStar = (this && this.__importStar) || function (mod) { + if (mod && mod.__esModule) return mod; + var result = {}; + if (mod != null) for (var k in mod) if (k !== "default" && Object.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k); + __setModuleDefault(result, mod); + return result; +}; +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.issueCommand = void 0; +// We use any as a valid input type +/* eslint-disable @typescript-eslint/no-explicit-any */ +const fs = __importStar(__nccwpck_require__(57147)); +const os = __importStar(__nccwpck_require__(22037)); +const utils_1 = __nccwpck_require__(50421); +function issueCommand(command, message) { + const filePath = process.env[`GITHUB_${command}`]; + if (!filePath) { + throw new Error(`Unable to find environment variable for file command ${command}`); + } + if (!fs.existsSync(filePath)) { + throw new Error(`Missing file at path: ${filePath}`); + } + fs.appendFileSync(filePath, `${utils_1.toCommandValue(message)}${os.EOL}`, { + encoding: 'utf8' + }); +} +exports.issueCommand = issueCommand; +//# sourceMappingURL=file-command.js.map + +/***/ }), + +/***/ 91130: +/***/ (function(__unused_webpack_module, exports, __nccwpck_require__) { + +"use strict"; + +var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) { + function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); } + return new (P || (P = Promise))(function (resolve, reject) { + function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } } + function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } } + function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); } + step((generator = generator.apply(thisArg, _arguments || [])).next()); + }); +}; +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.OidcClient = void 0; +const http_client_1 = __nccwpck_require__(56819); +const auth_1 = __nccwpck_require__(1388); +const core_1 = __nccwpck_require__(87454); +class OidcClient { + static createHttpClient(allowRetry = true, maxRetry = 10) { + const requestOptions = { + allowRetries: allowRetry, + maxRetries: maxRetry + }; + return new http_client_1.HttpClient('actions/oidc-client', [new auth_1.BearerCredentialHandler(OidcClient.getRequestToken())], requestOptions); + } + static getRequestToken() { + const token = process.env['ACTIONS_ID_TOKEN_REQUEST_TOKEN']; + if (!token) { + throw new Error('Unable to get ACTIONS_ID_TOKEN_REQUEST_TOKEN env variable'); + } + return token; + } + static getIDTokenUrl() { + const runtimeUrl = process.env['ACTIONS_ID_TOKEN_REQUEST_URL']; + if (!runtimeUrl) { + throw new Error('Unable to get ACTIONS_ID_TOKEN_REQUEST_URL env variable'); + } + return runtimeUrl; + } + static getCall(id_token_url) { + var _a; + return __awaiter(this, void 0, void 0, function* () { + const httpclient = OidcClient.createHttpClient(); + const res = yield httpclient + .getJson(id_token_url) + .catch(error => { + throw new Error(`Failed to get ID Token. \n + Error Code : ${error.statusCode}\n + Error Message: ${error.result.message}`); + }); + const id_token = (_a = res.result) === null || _a === void 0 ? void 0 : _a.value; + if (!id_token) { + throw new Error('Response json body do not have ID Token field'); + } + return id_token; + }); + } + static getIDToken(audience) { + return __awaiter(this, void 0, void 0, function* () { + try { + // New ID Token is requested from action service + let id_token_url = OidcClient.getIDTokenUrl(); + if (audience) { + const encodedAudience = encodeURIComponent(audience); + id_token_url = `${id_token_url}&audience=${encodedAudience}`; + } + core_1.debug(`ID token url is ${id_token_url}`); + const id_token = yield OidcClient.getCall(id_token_url); + core_1.setSecret(id_token); + return id_token; + } + catch (error) { + throw new Error(`Error message: ${error.message}`); + } + }); + } +} +exports.OidcClient = OidcClient; +//# sourceMappingURL=oidc-utils.js.map + +/***/ }), + +/***/ 50421: +/***/ ((__unused_webpack_module, exports) => { + +"use strict"; + +// We use any as a valid input type +/* eslint-disable @typescript-eslint/no-explicit-any */ +Object.defineProperty(exports, "__esModule", ({ value: true })); +exports.toCommandProperties = exports.toCommandValue = void 0; +/** + * Sanitizes an input into a string so it can be passed into issueCommand safely + * @param input input to sanitize into a string + */ +function toCommandValue(input) { + if (input === null || input === undefined) { + return ''; + } + else if (typeof input === 'string' || input instanceof String) { + return input; + } + return JSON.stringify(input); +} +exports.toCommandValue = toCommandValue; +/** + * + * @param annotationProperties + * @returns The command properties to send with the actual annotation command + * See IssueCommandProperties: https://github.com/actions/runner/blob/main/src/Runner.Worker/ActionCommandManager.cs#L646 + */ +function toCommandProperties(annotationProperties) { + if (!Object.keys(annotationProperties).length) { + return {}; + } + return { + title: annotationProperties.title, + file: annotationProperties.file, + line: annotationProperties.startLine, + endLine: annotationProperties.endLine, + col: annotationProperties.startColumn, + endColumn: annotationProperties.endColumn + }; +} +exports.toCommandProperties = toCommandProperties; +//# sourceMappingURL=utils.js.map + +/***/ }), + +/***/ 33683: +/***/ (function(__unused_webpack_module, exports, __nccwpck_require__) { + +"use strict"; + +var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) { + function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); } + return new (P || (P = Promise))(function (resolve, reject) { + function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } } + function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } } + function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); } + step((generator = generator.apply(thisArg, _arguments || [])).next()); + }); +}; +var __importStar = (this && this.__importStar) || function (mod) { + if (mod && mod.__esModule) return mod; + var result = {}; + if (mod != null) for (var k in mod) if (Object.hasOwnProperty.call(mod, k)) result[k] = mod[k]; + result["default"] = mod; + return result; +}; +Object.defineProperty(exports, "__esModule", ({ value: true })); +const tr = __importStar(__nccwpck_require__(16779)); +/** + * Exec a command. + * Output will be streamed to the live console. + * Returns promise with return code + * + * @param commandLine command to execute (can include additional args). Must be correctly escaped. + * @param args optional arguments for tool. Escaping is handled by the lib. + * @param options optional exec options. See ExecOptions + * @returns Promise exit code + */ +function exec(commandLine, args, options) { + return __awaiter(this, void 0, void 0, function* () { + const commandArgs = tr.argStringToArray(commandLine); + if (commandArgs.length === 0) { + throw new Error(`Parameter 'commandLine' cannot be null or empty.`); + } + // Path to tool to execute should be first arg + const toolPath = commandArgs[0]; + args = commandArgs.slice(1).concat(args || []); + const runner = new tr.ToolRunner(toolPath, args, options); + return runner.exec(); + }); +} +exports.exec = exec; +//# sourceMappingURL=exec.js.map + +/***/ }), + +/***/ 16779: +/***/ (function(__unused_webpack_module, exports, __nccwpck_require__) { + +"use strict"; + +var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) { + function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); } + return new (P || (P = Promise))(function (resolve, reject) { + function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } } + function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } } + function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); } + step((generator = generator.apply(thisArg, _arguments || [])).next()); + }); +}; +var __importStar = (this && this.__importStar) || function (mod) { + if (mod && mod.__esModule) return mod; + var result = {}; + if (mod != null) for (var k in mod) if (Object.hasOwnProperty.call(mod, k)) result[k] = mod[k]; + result["default"] = mod; + return result; +}; +Object.defineProperty(exports, "__esModule", ({ value: true })); +const os = __importStar(__nccwpck_require__(22037)); +const events = __importStar(__nccwpck_require__(82361)); +const child = __importStar(__nccwpck_require__(32081)); +const path = __importStar(__nccwpck_require__(71017)); +const io = __importStar(__nccwpck_require__(27692)); +const ioUtil = __importStar(__nccwpck_require__(34889)); +/* eslint-disable @typescript-eslint/unbound-method */ +const IS_WINDOWS = process.platform === 'win32'; +/* + * Class for running command line tools. Handles quoting and arg parsing in a platform agnostic way. + */ +class ToolRunner extends events.EventEmitter { + constructor(toolPath, args, options) { + super(); + if (!toolPath) { + throw new Error("Parameter 'toolPath' cannot be null or empty."); + } + this.toolPath = toolPath; + this.args = args || []; + this.options = options || {}; + } + _debug(message) { + if (this.options.listeners && this.options.listeners.debug) { + this.options.listeners.debug(message); + } + } + _getCommandString(options, noPrefix) { + const toolPath = this._getSpawnFileName(); + const args = this._getSpawnArgs(options); + let cmd = noPrefix ? '' : '[command]'; // omit prefix when piped to a second tool + if (IS_WINDOWS) { + // Windows + cmd file + if (this._isCmdFile()) { + cmd += toolPath; + for (const a of args) { + cmd += ` ${a}`; + } + } + // Windows + verbatim + else if (options.windowsVerbatimArguments) { + cmd += `"${toolPath}"`; + for (const a of args) { + cmd += ` ${a}`; + } + } + // Windows (regular) + else { + cmd += this._windowsQuoteCmdArg(toolPath); + for (const a of args) { + cmd += ` ${this._windowsQuoteCmdArg(a)}`; + } + } + } + else { + // OSX/Linux - this can likely be improved with some form of quoting. + // creating processes on Unix is fundamentally different than Windows. + // on Unix, execvp() takes an arg array. + cmd += toolPath; + for (const a of args) { + cmd += ` ${a}`; + } + } + return cmd; + } + _processLineBuffer(data, strBuffer, onLine) { + try { + let s = strBuffer + data.toString(); + let n = s.indexOf(os.EOL); + while (n > -1) { + const line = s.substring(0, n); + onLine(line); + // the rest of the string ... + s = s.substring(n + os.EOL.length); + n = s.indexOf(os.EOL); + } + strBuffer = s; + } + catch (err) { + // streaming lines to console is best effort. Don't fail a build. + this._debug(`error processing line. Failed with error ${err}`); + } + } + _getSpawnFileName() { + if (IS_WINDOWS) { + if (this._isCmdFile()) { + return process.env['COMSPEC'] || 'cmd.exe'; + } + } + return this.toolPath; + } + _getSpawnArgs(options) { + if (IS_WINDOWS) { + if (this._isCmdFile()) { + let argline = `/D /S /C "${this._windowsQuoteCmdArg(this.toolPath)}`; + for (const a of this.args) { + argline += ' '; + argline += options.windowsVerbatimArguments + ? a + : this._windowsQuoteCmdArg(a); + } + argline += '"'; + return [argline]; + } + } + return this.args; + } + _endsWith(str, end) { + return str.endsWith(end); + } + _isCmdFile() { + const upperToolPath = this.toolPath.toUpperCase(); + return (this._endsWith(upperToolPath, '.CMD') || + this._endsWith(upperToolPath, '.BAT')); + } + _windowsQuoteCmdArg(arg) { + // for .exe, apply the normal quoting rules that libuv applies + if (!this._isCmdFile()) { + return this._uvQuoteCmdArg(arg); + } + // otherwise apply quoting rules specific to the cmd.exe command line parser. + // the libuv rules are generic and are not designed specifically for cmd.exe + // command line parser. + // + // for a detailed description of the cmd.exe command line parser, refer to + // http://stackoverflow.com/questions/4094699/how-does-the-windows-command-interpreter-cmd-exe-parse-scripts/7970912#7970912 + // need quotes for empty arg + if (!arg) { + return '""'; + } + // determine whether the arg needs to be quoted + const cmdSpecialChars = [ + ' ', + '\t', + '&', + '(', + ')', + '[', + ']', + '{', + '}', + '^', + '=', + ';', + '!', + "'", + '+', + ',', + '`', + '~', + '|', + '<', + '>', + '"' + ]; + let needsQuotes = false; + for (const char of arg) { + if (cmdSpecialChars.some(x => x === char)) { + needsQuotes = true; + break; + } + } + // short-circuit if quotes not needed + if (!needsQuotes) { + return arg; + } + // the following quoting rules are very similar to the rules that by libuv applies. + // + // 1) wrap the string in quotes + // + // 2) double-up quotes - i.e. " => "" + // + // this is different from the libuv quoting rules. libuv replaces " with \", which unfortunately + // doesn't work well with a cmd.exe command line. + // + // note, replacing " with "" also works well if the arg is passed to a downstream .NET console app. + // for example, the command line: + // foo.exe "myarg:""my val""" + // is parsed by a .NET console app into an arg array: + // [ "myarg:\"my val\"" ] + // which is the same end result when applying libuv quoting rules. although the actual + // command line from libuv quoting rules would look like: + // foo.exe "myarg:\"my val\"" + // + // 3) double-up slashes that precede a quote, + // e.g. hello \world => "hello \world" + // hello\"world => "hello\\""world" + // hello\\"world => "hello\\\\""world" + // hello world\ => "hello world\\" + // + // technically this is not required for a cmd.exe command line, or the batch argument parser. + // the reasons for including this as a .cmd quoting rule are: + // + // a) this is optimized for the scenario where the argument is passed from the .cmd file to an + // external program. many programs (e.g. .NET console apps) rely on the slash-doubling rule. + // + // b) it's what we've been doing previously (by deferring to node default behavior) and we + // haven't heard any complaints about that aspect. + // + // note, a weakness of the quoting rules chosen here, is that % is not escaped. in fact, % cannot be + // escaped when used on the command line directly - even though within a .cmd file % can be escaped + // by using %%. + // + // the saving grace is, on the command line, %var% is left as-is if var is not defined. this contrasts + // the line parsing rules within a .cmd file, where if var is not defined it is replaced with nothing. + // + // one option that was explored was replacing % with ^% - i.e. %var% => ^%var^%. this hack would + // often work, since it is unlikely that var^ would exist, and the ^ character is removed when the + // variable is used. the problem, however, is that ^ is not removed when %* is used to pass the args + // to an external program. + // + // an unexplored potential solution for the % escaping problem, is to create a wrapper .cmd file. + // % can be escaped within a .cmd file. + let reverse = '"'; + let quoteHit = true; + for (let i = arg.length; i > 0; i--) { + // walk the string in reverse + reverse += arg[i - 1]; + if (quoteHit && arg[i - 1] === '\\') { + reverse += '\\'; // double the slash + } + else if (arg[i - 1] === '"') { + quoteHit = true; + reverse += '"'; // double the quote + } + else { + quoteHit = false; + } + } + reverse += '"'; + return reverse + .split('') + .reverse() + .join(''); + } + _uvQuoteCmdArg(arg) { + // Tool runner wraps child_process.spawn() and needs to apply the same quoting as + // Node in certain cases where the undocumented spawn option windowsVerbatimArguments + // is used. + // + // Since this function is a port of quote_cmd_arg from Node 4.x (technically, lib UV, + // see https://github.com/nodejs/node/blob/v4.x/deps/uv/src/win/process.c for details), + // pasting copyright notice from Node within this function: + // + // Copyright Joyent, Inc. and other Node contributors. All rights reserved. + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to + // deal in the Software without restriction, including without limitation the + // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + // sell copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in + // all copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + // IN THE SOFTWARE. + if (!arg) { + // Need double quotation for empty argument + return '""'; + } + if (!arg.includes(' ') && !arg.includes('\t') && !arg.includes('"')) { + // No quotation needed + return arg; + } + if (!arg.includes('"') && !arg.includes('\\')) { + // No embedded double quotes or backslashes, so I can just wrap + // quote marks around the whole thing. + return `"${arg}"`; + } + // Expected input/output: + // input : hello"world + // output: "hello\"world" + // input : hello""world + // output: "hello\"\"world" + // input : hello\world + // output: hello\world + // input : hello\\world + // output: hello\\world + // input : hello\"world + // output: "hello\\\"world" + // input : hello\\"world + // output: "hello\\\\\"world" + // input : hello world\ + // output: "hello world\\" - note the comment in libuv actually reads "hello world\" + // but it appears the comment is wrong, it should be "hello world\\" + let reverse = '"'; + let quoteHit = true; + for (let i = arg.length; i > 0; i--) { + // walk the string in reverse + reverse += arg[i - 1]; + if (quoteHit && arg[i - 1] === '\\') { + reverse += '\\'; + } + else if (arg[i - 1] === '"') { + quoteHit = true; + reverse += '\\'; + } + else { + quoteHit = false; + } + } + reverse += '"'; + return reverse + .split('') + .reverse() + .join(''); + } + _cloneExecOptions(options) { + options = options || {}; + const result = { + cwd: options.cwd || process.cwd(), + env: options.env || process.env, + silent: options.silent || false, + windowsVerbatimArguments: options.windowsVerbatimArguments || false, + failOnStdErr: options.failOnStdErr || false, + ignoreReturnCode: options.ignoreReturnCode || false, + delay: options.delay || 10000 + }; + result.outStream = options.outStream || process.stdout; + result.errStream = options.errStream || process.stderr; + return result; + } + _getSpawnOptions(options, toolPath) { + options = options || {}; + const result = {}; + result.cwd = options.cwd; + result.env = options.env; + result['windowsVerbatimArguments'] = + options.windowsVerbatimArguments || this._isCmdFile(); + if (options.windowsVerbatimArguments) { + result.argv0 = `"${toolPath}"`; + } + return result; + } + /** + * Exec a tool. + * Output will be streamed to the live console. + * Returns promise with return code + * + * @param tool path to tool to exec + * @param options optional exec options. See ExecOptions + * @returns number + */ + exec() { + return __awaiter(this, void 0, void 0, function* () { + // root the tool path if it is unrooted and contains relative pathing + if (!ioUtil.isRooted(this.toolPath) && + (this.toolPath.includes('/') || + (IS_WINDOWS && this.toolPath.includes('\\')))) { + // prefer options.cwd if it is specified, however options.cwd may also need to be rooted + this.toolPath = path.resolve(process.cwd(), this.options.cwd || process.cwd(), this.toolPath); + } + // if the tool is only a file name, then resolve it from the PATH + // otherwise verify it exists (add extension on Windows if necessary) + this.toolPath = yield io.which(this.toolPath, true); + return new Promise((resolve, reject) => { + this._debug(`exec tool: ${this.toolPath}`); + this._debug('arguments:'); + for (const arg of this.args) { + this._debug(` ${arg}`); + } + const optionsNonNull = this._cloneExecOptions(this.options); + if (!optionsNonNull.silent && optionsNonNull.outStream) { + optionsNonNull.outStream.write(this._getCommandString(optionsNonNull) + os.EOL); + } + const state = new ExecState(optionsNonNull, this.toolPath); + state.on('debug', (message) => { + this._debug(message); + }); + const fileName = this._getSpawnFileName(); + const cp = child.spawn(fileName, this._getSpawnArgs(optionsNonNull), this._getSpawnOptions(this.options, fileName)); + const stdbuffer = ''; + if (cp.stdout) { + cp.stdout.on('data', (data) => { + if (this.options.listeners && this.options.listeners.stdout) { + this.options.listeners.stdout(data); + } + if (!optionsNonNull.silent && optionsNonNull.outStream) { + optionsNonNull.outStream.write(data); + } + this._processLineBuffer(data, stdbuffer, (line) => { + if (this.options.listeners && this.options.listeners.stdline) { + this.options.listeners.stdline(line); + } + }); + }); + } + const errbuffer = ''; + if (cp.stderr) { + cp.stderr.on('data', (data) => { + state.processStderr = true; + if (this.options.listeners && this.options.listeners.stderr) { + this.options.listeners.stderr(data); + } + if (!optionsNonNull.silent && + optionsNonNull.errStream && + optionsNonNull.outStream) { + const s = optionsNonNull.failOnStdErr + ? optionsNonNull.errStream + : optionsNonNull.outStream; + s.write(data); + } + this._processLineBuffer(data, errbuffer, (line) => { + if (this.options.listeners && this.options.listeners.errline) { + this.options.listeners.errline(line); + } + }); + }); + } + cp.on('error', (err) => { + state.processError = err.message; + state.processExited = true; + state.processClosed = true; + state.CheckComplete(); + }); + cp.on('exit', (code) => { + state.processExitCode = code; + state.processExited = true; + this._debug(`Exit code ${code} received from tool '${this.toolPath}'`); + state.CheckComplete(); + }); + cp.on('close', (code) => { + state.processExitCode = code; + state.processExited = true; + state.processClosed = true; + this._debug(`STDIO streams have closed for tool '${this.toolPath}'`); + state.CheckComplete(); + }); + state.on('done', (error, exitCode) => { + if (stdbuffer.length > 0) { + this.emit('stdline', stdbuffer); + } + if (errbuffer.length > 0) { + this.emit('errline', errbuffer); + } + cp.removeAllListeners(); + if (error) { + reject(error); + } + else { + resolve(exitCode); + } + }); + if (this.options.input) { + if (!cp.stdin) { + throw new Error('child process missing stdin'); + } + cp.stdin.end(this.options.input); + } + }); + }); + } +} +exports.ToolRunner = ToolRunner; +/** + * Convert an arg string to an array of args. Handles escaping + * + * @param argString string of arguments + * @returns string[] array of arguments + */ +function argStringToArray(argString) { + const args = []; + let inQuotes = false; + let escaped = false; + let arg = ''; + function append(c) { + // we only escape double quotes. + if (escaped && c !== '"') { + arg += '\\'; + } + arg += c; + escaped = false; + } + for (let i = 0; i < argString.length; i++) { + const c = argString.charAt(i); + if (c === '"') { + if (!escaped) { + inQuotes = !inQuotes; + } + else { + append(c); + } + continue; + } + if (c === '\\' && escaped) { + append(c); + continue; + } + if (c === '\\' && inQuotes) { + escaped = true; + continue; + } + if (c === ' ' && !inQuotes) { + if (arg.length > 0) { + args.push(arg); + arg = ''; + } + continue; + } + append(c); + } + if (arg.length > 0) { + args.push(arg.trim()); + } + return args; +} +exports.argStringToArray = argStringToArray; +class ExecState extends events.EventEmitter { + constructor(options, toolPath) { + super(); + this.processClosed = false; // tracks whether the process has exited and stdio is closed + this.processError = ''; + this.processExitCode = 0; + this.processExited = false; // tracks whether the process has exited + this.processStderr = false; // tracks whether stderr was written to + this.delay = 10000; // 10 seconds + this.done = false; + this.timeout = null; + if (!toolPath) { + throw new Error('toolPath must not be empty'); + } + this.options = options; + this.toolPath = toolPath; + if (options.delay) { + this.delay = options.delay; + } + } + CheckComplete() { + if (this.done) { + return; + } + if (this.processClosed) { + this._setResult(); + } + else if (this.processExited) { + this.timeout = setTimeout(ExecState.HandleTimeout, this.delay, this); + } + } + _debug(message) { + this.emit('debug', message); + } + _setResult() { + // determine whether there is an error + let error; + if (this.processExited) { + if (this.processError) { + error = new Error(`There was an error when attempting to execute the process '${this.toolPath}'. This may indicate the process failed to start. Error: ${this.processError}`); + } + else if (this.processExitCode !== 0 && !this.options.ignoreReturnCode) { + error = new Error(`The process '${this.toolPath}' failed with exit code ${this.processExitCode}`); + } + else if (this.processStderr && this.options.failOnStdErr) { + error = new Error(`The process '${this.toolPath}' failed because one or more lines were written to the STDERR stream`); + } + } + // clear the timeout + if (this.timeout) { + clearTimeout(this.timeout); + this.timeout = null; + } + this.done = true; + this.emit('done', error, this.processExitCode); + } + static HandleTimeout(state) { + if (state.done) { + return; + } + if (!state.processClosed && state.processExited) { + const message = `The STDIO streams did not close within ${state.delay / + 1000} seconds of the exit event from process '${state.toolPath}'. This may indicate a child process inherited the STDIO streams and has not yet exited.`; + state._debug(message); + } + state._setResult(); + } +} +//# sourceMappingURL=toolrunner.js.map + +/***/ }), + +/***/ 1388: +/***/ ((__unused_webpack_module, exports) => { + +"use strict"; + +Object.defineProperty(exports, "__esModule", ({ value: true })); +class BasicCredentialHandler { + constructor(username, password) { + this.username = username; + this.password = password; + } + prepareRequest(options) { + options.headers['Authorization'] = + 'Basic ' + + Buffer.from(this.username + ':' + this.password).toString('base64'); + } + // This handler cannot handle 401 + canHandleAuthentication(response) { + return false; + } + handleAuthentication(httpClient, requestInfo, objs) { + return null; + } +} +exports.BasicCredentialHandler = BasicCredentialHandler; +class BearerCredentialHandler { + constructor(token) { + this.token = token; + } + // currently implements pre-authorization + // TODO: support preAuth = false where it hooks on 401 + prepareRequest(options) { + options.headers['Authorization'] = 'Bearer ' + this.token; + } + // This handler cannot handle 401 + canHandleAuthentication(response) { + return false; + } + handleAuthentication(httpClient, requestInfo, objs) { + return null; + } +} +exports.BearerCredentialHandler = BearerCredentialHandler; +class PersonalAccessTokenCredentialHandler { + constructor(token) { + this.token = token; + } + // currently implements pre-authorization + // TODO: support preAuth = false where it hooks on 401 + prepareRequest(options) { + options.headers['Authorization'] = + 'Basic ' + Buffer.from('PAT:' + this.token).toString('base64'); + } + // This handler cannot handle 401 + canHandleAuthentication(response) { + return false; + } + handleAuthentication(httpClient, requestInfo, objs) { + return null; + } +} +exports.PersonalAccessTokenCredentialHandler = PersonalAccessTokenCredentialHandler; + + +/***/ }), + +/***/ 56819: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { + +"use strict"; + +Object.defineProperty(exports, "__esModule", ({ value: true })); +const http = __nccwpck_require__(13685); +const https = __nccwpck_require__(95687); +const pm = __nccwpck_require__(28606); +let tunnel; +var HttpCodes; +(function (HttpCodes) { + HttpCodes[HttpCodes["OK"] = 200] = "OK"; + HttpCodes[HttpCodes["MultipleChoices"] = 300] = "MultipleChoices"; + HttpCodes[HttpCodes["MovedPermanently"] = 301] = "MovedPermanently"; + HttpCodes[HttpCodes["ResourceMoved"] = 302] = "ResourceMoved"; + HttpCodes[HttpCodes["SeeOther"] = 303] = "SeeOther"; + HttpCodes[HttpCodes["NotModified"] = 304] = "NotModified"; + HttpCodes[HttpCodes["UseProxy"] = 305] = "UseProxy"; + HttpCodes[HttpCodes["SwitchProxy"] = 306] = "SwitchProxy"; + HttpCodes[HttpCodes["TemporaryRedirect"] = 307] = "TemporaryRedirect"; + HttpCodes[HttpCodes["PermanentRedirect"] = 308] = "PermanentRedirect"; + HttpCodes[HttpCodes["BadRequest"] = 400] = "BadRequest"; + HttpCodes[HttpCodes["Unauthorized"] = 401] = "Unauthorized"; + HttpCodes[HttpCodes["PaymentRequired"] = 402] = "PaymentRequired"; + HttpCodes[HttpCodes["Forbidden"] = 403] = "Forbidden"; + HttpCodes[HttpCodes["NotFound"] = 404] = "NotFound"; + HttpCodes[HttpCodes["MethodNotAllowed"] = 405] = "MethodNotAllowed"; + HttpCodes[HttpCodes["NotAcceptable"] = 406] = "NotAcceptable"; + HttpCodes[HttpCodes["ProxyAuthenticationRequired"] = 407] = "ProxyAuthenticationRequired"; + HttpCodes[HttpCodes["RequestTimeout"] = 408] = "RequestTimeout"; + HttpCodes[HttpCodes["Conflict"] = 409] = "Conflict"; + HttpCodes[HttpCodes["Gone"] = 410] = "Gone"; + HttpCodes[HttpCodes["TooManyRequests"] = 429] = "TooManyRequests"; + HttpCodes[HttpCodes["InternalServerError"] = 500] = "InternalServerError"; + HttpCodes[HttpCodes["NotImplemented"] = 501] = "NotImplemented"; + HttpCodes[HttpCodes["BadGateway"] = 502] = "BadGateway"; + HttpCodes[HttpCodes["ServiceUnavailable"] = 503] = "ServiceUnavailable"; + HttpCodes[HttpCodes["GatewayTimeout"] = 504] = "GatewayTimeout"; +})(HttpCodes = exports.HttpCodes || (exports.HttpCodes = {})); +var Headers; +(function (Headers) { + Headers["Accept"] = "accept"; + Headers["ContentType"] = "content-type"; +})(Headers = exports.Headers || (exports.Headers = {})); +var MediaTypes; +(function (MediaTypes) { + MediaTypes["ApplicationJson"] = "application/json"; +})(MediaTypes = exports.MediaTypes || (exports.MediaTypes = {})); +/** + * Returns the proxy URL, depending upon the supplied url and proxy environment variables. + * @param serverUrl The server URL where the request will be sent. For example, https://api.github.com + */ +function getProxyUrl(serverUrl) { + let proxyUrl = pm.getProxyUrl(new URL(serverUrl)); + return proxyUrl ? proxyUrl.href : ''; +} +exports.getProxyUrl = getProxyUrl; +const HttpRedirectCodes = [ + HttpCodes.MovedPermanently, + HttpCodes.ResourceMoved, + HttpCodes.SeeOther, + HttpCodes.TemporaryRedirect, + HttpCodes.PermanentRedirect +]; +const HttpResponseRetryCodes = [ + HttpCodes.BadGateway, + HttpCodes.ServiceUnavailable, + HttpCodes.GatewayTimeout +]; +const RetryableHttpVerbs = ['OPTIONS', 'GET', 'DELETE', 'HEAD']; +const ExponentialBackoffCeiling = 10; +const ExponentialBackoffTimeSlice = 5; +class HttpClientError extends Error { + constructor(message, statusCode) { + super(message); + this.name = 'HttpClientError'; + this.statusCode = statusCode; + Object.setPrototypeOf(this, HttpClientError.prototype); + } +} +exports.HttpClientError = HttpClientError; +class HttpClientResponse { + constructor(message) { + this.message = message; + } + readBody() { + return new Promise(async (resolve, reject) => { + let output = Buffer.alloc(0); + this.message.on('data', (chunk) => { + output = Buffer.concat([output, chunk]); + }); + this.message.on('end', () => { + resolve(output.toString()); + }); + }); + } +} +exports.HttpClientResponse = HttpClientResponse; +function isHttps(requestUrl) { + let parsedUrl = new URL(requestUrl); + return parsedUrl.protocol === 'https:'; +} +exports.isHttps = isHttps; +class HttpClient { + constructor(userAgent, handlers, requestOptions) { + this._ignoreSslError = false; + this._allowRedirects = true; + this._allowRedirectDowngrade = false; + this._maxRedirects = 50; + this._allowRetries = false; + this._maxRetries = 1; + this._keepAlive = false; + this._disposed = false; + this.userAgent = userAgent; + this.handlers = handlers || []; + this.requestOptions = requestOptions; + if (requestOptions) { + if (requestOptions.ignoreSslError != null) { + this._ignoreSslError = requestOptions.ignoreSslError; + } + this._socketTimeout = requestOptions.socketTimeout; + if (requestOptions.allowRedirects != null) { + this._allowRedirects = requestOptions.allowRedirects; + } + if (requestOptions.allowRedirectDowngrade != null) { + this._allowRedirectDowngrade = requestOptions.allowRedirectDowngrade; + } + if (requestOptions.maxRedirects != null) { + this._maxRedirects = Math.max(requestOptions.maxRedirects, 0); + } + if (requestOptions.keepAlive != null) { + this._keepAlive = requestOptions.keepAlive; + } + if (requestOptions.allowRetries != null) { + this._allowRetries = requestOptions.allowRetries; + } + if (requestOptions.maxRetries != null) { + this._maxRetries = requestOptions.maxRetries; + } + } + } + options(requestUrl, additionalHeaders) { + return this.request('OPTIONS', requestUrl, null, additionalHeaders || {}); + } + get(requestUrl, additionalHeaders) { + return this.request('GET', requestUrl, null, additionalHeaders || {}); + } + del(requestUrl, additionalHeaders) { + return this.request('DELETE', requestUrl, null, additionalHeaders || {}); + } + post(requestUrl, data, additionalHeaders) { + return this.request('POST', requestUrl, data, additionalHeaders || {}); + } + patch(requestUrl, data, additionalHeaders) { + return this.request('PATCH', requestUrl, data, additionalHeaders || {}); + } + put(requestUrl, data, additionalHeaders) { + return this.request('PUT', requestUrl, data, additionalHeaders || {}); + } + head(requestUrl, additionalHeaders) { + return this.request('HEAD', requestUrl, null, additionalHeaders || {}); + } + sendStream(verb, requestUrl, stream, additionalHeaders) { + return this.request(verb, requestUrl, stream, additionalHeaders); + } + /** + * Gets a typed object from an endpoint + * Be aware that not found returns a null. Other errors (4xx, 5xx) reject the promise + */ + async getJson(requestUrl, additionalHeaders = {}) { + additionalHeaders[Headers.Accept] = this._getExistingOrDefaultHeader(additionalHeaders, Headers.Accept, MediaTypes.ApplicationJson); + let res = await this.get(requestUrl, additionalHeaders); + return this._processResponse(res, this.requestOptions); + } + async postJson(requestUrl, obj, additionalHeaders = {}) { + let data = JSON.stringify(obj, null, 2); + additionalHeaders[Headers.Accept] = this._getExistingOrDefaultHeader(additionalHeaders, Headers.Accept, MediaTypes.ApplicationJson); + additionalHeaders[Headers.ContentType] = this._getExistingOrDefaultHeader(additionalHeaders, Headers.ContentType, MediaTypes.ApplicationJson); + let res = await this.post(requestUrl, data, additionalHeaders); + return this._processResponse(res, this.requestOptions); + } + async putJson(requestUrl, obj, additionalHeaders = {}) { + let data = JSON.stringify(obj, null, 2); + additionalHeaders[Headers.Accept] = this._getExistingOrDefaultHeader(additionalHeaders, Headers.Accept, MediaTypes.ApplicationJson); + additionalHeaders[Headers.ContentType] = this._getExistingOrDefaultHeader(additionalHeaders, Headers.ContentType, MediaTypes.ApplicationJson); + let res = await this.put(requestUrl, data, additionalHeaders); + return this._processResponse(res, this.requestOptions); + } + async patchJson(requestUrl, obj, additionalHeaders = {}) { + let data = JSON.stringify(obj, null, 2); + additionalHeaders[Headers.Accept] = this._getExistingOrDefaultHeader(additionalHeaders, Headers.Accept, MediaTypes.ApplicationJson); + additionalHeaders[Headers.ContentType] = this._getExistingOrDefaultHeader(additionalHeaders, Headers.ContentType, MediaTypes.ApplicationJson); + let res = await this.patch(requestUrl, data, additionalHeaders); + return this._processResponse(res, this.requestOptions); + } + /** + * Makes a raw http request. + * All other methods such as get, post, patch, and request ultimately call this. + * Prefer get, del, post and patch + */ + async request(verb, requestUrl, data, headers) { + if (this._disposed) { + throw new Error('Client has already been disposed.'); + } + let parsedUrl = new URL(requestUrl); + let info = this._prepareRequest(verb, parsedUrl, headers); + // Only perform retries on reads since writes may not be idempotent. + let maxTries = this._allowRetries && RetryableHttpVerbs.indexOf(verb) != -1 + ? this._maxRetries + 1 + : 1; + let numTries = 0; + let response; + while (numTries < maxTries) { + response = await this.requestRaw(info, data); + // Check if it's an authentication challenge + if (response && + response.message && + response.message.statusCode === HttpCodes.Unauthorized) { + let authenticationHandler; + for (let i = 0; i < this.handlers.length; i++) { + if (this.handlers[i].canHandleAuthentication(response)) { + authenticationHandler = this.handlers[i]; + break; + } + } + if (authenticationHandler) { + return authenticationHandler.handleAuthentication(this, info, data); + } + else { + // We have received an unauthorized response but have no handlers to handle it. + // Let the response return to the caller. + return response; + } + } + let redirectsRemaining = this._maxRedirects; + while (HttpRedirectCodes.indexOf(response.message.statusCode) != -1 && + this._allowRedirects && + redirectsRemaining > 0) { + const redirectUrl = response.message.headers['location']; + if (!redirectUrl) { + // if there's no location to redirect to, we won't + break; + } + let parsedRedirectUrl = new URL(redirectUrl); + if (parsedUrl.protocol == 'https:' && + parsedUrl.protocol != parsedRedirectUrl.protocol && + !this._allowRedirectDowngrade) { + throw new Error('Redirect from HTTPS to HTTP protocol. This downgrade is not allowed for security reasons. If you want to allow this behavior, set the allowRedirectDowngrade option to true.'); + } + // we need to finish reading the response before reassigning response + // which will leak the open socket. + await response.readBody(); + // strip authorization header if redirected to a different hostname + if (parsedRedirectUrl.hostname !== parsedUrl.hostname) { + for (let header in headers) { + // header names are case insensitive + if (header.toLowerCase() === 'authorization') { + delete headers[header]; + } + } + } + // let's make the request with the new redirectUrl + info = this._prepareRequest(verb, parsedRedirectUrl, headers); + response = await this.requestRaw(info, data); + redirectsRemaining--; + } + if (HttpResponseRetryCodes.indexOf(response.message.statusCode) == -1) { + // If not a retry code, return immediately instead of retrying + return response; + } + numTries += 1; + if (numTries < maxTries) { + await response.readBody(); + await this._performExponentialBackoff(numTries); + } + } + return response; + } + /** + * Needs to be called if keepAlive is set to true in request options. + */ + dispose() { + if (this._agent) { + this._agent.destroy(); + } + this._disposed = true; + } + /** + * Raw request. + * @param info + * @param data + */ + requestRaw(info, data) { + return new Promise((resolve, reject) => { + let callbackForResult = function (err, res) { + if (err) { + reject(err); + } + resolve(res); + }; + this.requestRawWithCallback(info, data, callbackForResult); + }); + } + /** + * Raw request with callback. + * @param info + * @param data + * @param onResult + */ + requestRawWithCallback(info, data, onResult) { + let socket; + if (typeof data === 'string') { + info.options.headers['Content-Length'] = Buffer.byteLength(data, 'utf8'); + } + let callbackCalled = false; + let handleResult = (err, res) => { + if (!callbackCalled) { + callbackCalled = true; + onResult(err, res); + } + }; + let req = info.httpModule.request(info.options, (msg) => { + let res = new HttpClientResponse(msg); + handleResult(null, res); + }); + req.on('socket', sock => { + socket = sock; + }); + // If we ever get disconnected, we want the socket to timeout eventually + req.setTimeout(this._socketTimeout || 3 * 60000, () => { + if (socket) { + socket.end(); + } + handleResult(new Error('Request timeout: ' + info.options.path), null); + }); + req.on('error', function (err) { + // err has statusCode property + // res should have headers + handleResult(err, null); + }); + if (data && typeof data === 'string') { + req.write(data, 'utf8'); + } + if (data && typeof data !== 'string') { + data.on('close', function () { + req.end(); + }); + data.pipe(req); + } + else { + req.end(); + } + } + /** + * Gets an http agent. This function is useful when you need an http agent that handles + * routing through a proxy server - depending upon the url and proxy environment variables. + * @param serverUrl The server URL where the request will be sent. For example, https://api.github.com + */ + getAgent(serverUrl) { + let parsedUrl = new URL(serverUrl); + return this._getAgent(parsedUrl); + } + _prepareRequest(method, requestUrl, headers) { + const info = {}; + info.parsedUrl = requestUrl; + const usingSsl = info.parsedUrl.protocol === 'https:'; + info.httpModule = usingSsl ? https : http; + const defaultPort = usingSsl ? 443 : 80; + info.options = {}; + info.options.host = info.parsedUrl.hostname; + info.options.port = info.parsedUrl.port + ? parseInt(info.parsedUrl.port) + : defaultPort; + info.options.path = + (info.parsedUrl.pathname || '') + (info.parsedUrl.search || ''); + info.options.method = method; + info.options.headers = this._mergeHeaders(headers); + if (this.userAgent != null) { + info.options.headers['user-agent'] = this.userAgent; + } + info.options.agent = this._getAgent(info.parsedUrl); + // gives handlers an opportunity to participate + if (this.handlers) { + this.handlers.forEach(handler => { + handler.prepareRequest(info.options); + }); + } + return info; + } + _mergeHeaders(headers) { + const lowercaseKeys = obj => Object.keys(obj).reduce((c, k) => ((c[k.toLowerCase()] = obj[k]), c), {}); + if (this.requestOptions && this.requestOptions.headers) { + return Object.assign({}, lowercaseKeys(this.requestOptions.headers), lowercaseKeys(headers)); + } + return lowercaseKeys(headers || {}); + } + _getExistingOrDefaultHeader(additionalHeaders, header, _default) { + const lowercaseKeys = obj => Object.keys(obj).reduce((c, k) => ((c[k.toLowerCase()] = obj[k]), c), {}); + let clientHeader; + if (this.requestOptions && this.requestOptions.headers) { + clientHeader = lowercaseKeys(this.requestOptions.headers)[header]; + } + return additionalHeaders[header] || clientHeader || _default; + } + _getAgent(parsedUrl) { + let agent; + let proxyUrl = pm.getProxyUrl(parsedUrl); + let useProxy = proxyUrl && proxyUrl.hostname; + if (this._keepAlive && useProxy) { + agent = this._proxyAgent; + } + if (this._keepAlive && !useProxy) { + agent = this._agent; + } + // if agent is already assigned use that agent. + if (!!agent) { + return agent; + } + const usingSsl = parsedUrl.protocol === 'https:'; + let maxSockets = 100; + if (!!this.requestOptions) { + maxSockets = this.requestOptions.maxSockets || http.globalAgent.maxSockets; + } + if (useProxy) { + // If using proxy, need tunnel + if (!tunnel) { + tunnel = __nccwpck_require__(63221); + } + const agentOptions = { + maxSockets: maxSockets, + keepAlive: this._keepAlive, + proxy: { + ...((proxyUrl.username || proxyUrl.password) && { + proxyAuth: `${proxyUrl.username}:${proxyUrl.password}` + }), + host: proxyUrl.hostname, + port: proxyUrl.port + } + }; + let tunnelAgent; + const overHttps = proxyUrl.protocol === 'https:'; + if (usingSsl) { + tunnelAgent = overHttps ? tunnel.httpsOverHttps : tunnel.httpsOverHttp; + } + else { + tunnelAgent = overHttps ? tunnel.httpOverHttps : tunnel.httpOverHttp; + } + agent = tunnelAgent(agentOptions); + this._proxyAgent = agent; + } + // if reusing agent across request and tunneling agent isn't assigned create a new agent + if (this._keepAlive && !agent) { + const options = { keepAlive: this._keepAlive, maxSockets: maxSockets }; + agent = usingSsl ? new https.Agent(options) : new http.Agent(options); + this._agent = agent; + } + // if not using private agent and tunnel agent isn't setup then use global agent + if (!agent) { + agent = usingSsl ? https.globalAgent : http.globalAgent; + } + if (usingSsl && this._ignoreSslError) { + // we don't want to set NODE_TLS_REJECT_UNAUTHORIZED=0 since that will affect request for entire process + // http.RequestOptions doesn't expose a way to modify RequestOptions.agent.options + // we have to cast it to any and change it directly + agent.options = Object.assign(agent.options || {}, { + rejectUnauthorized: false + }); + } + return agent; + } + _performExponentialBackoff(retryNumber) { + retryNumber = Math.min(ExponentialBackoffCeiling, retryNumber); + const ms = ExponentialBackoffTimeSlice * Math.pow(2, retryNumber); + return new Promise(resolve => setTimeout(() => resolve(), ms)); + } + static dateTimeDeserializer(key, value) { + if (typeof value === 'string') { + let a = new Date(value); + if (!isNaN(a.valueOf())) { + return a; + } + } + return value; + } + async _processResponse(res, options) { + return new Promise(async (resolve, reject) => { + const statusCode = res.message.statusCode; + const response = { + statusCode: statusCode, + result: null, + headers: {} + }; + // not found leads to null obj returned + if (statusCode == HttpCodes.NotFound) { + resolve(response); + } + let obj; + let contents; + // get the result from the body + try { + contents = await res.readBody(); + if (contents && contents.length > 0) { + if (options && options.deserializeDates) { + obj = JSON.parse(contents, HttpClient.dateTimeDeserializer); + } + else { + obj = JSON.parse(contents); + } + response.result = obj; + } + response.headers = res.message.headers; + } + catch (err) { + // Invalid resource (contents not json); leaving result obj null + } + // note that 3xx redirects are handled by the http layer. + if (statusCode > 299) { + let msg; + // if exception/error in body, attempt to get better error + if (obj && obj.message) { + msg = obj.message; + } + else if (contents && contents.length > 0) { + // it may be the case that the exception is in the body message as string + msg = contents; + } + else { + msg = 'Failed request: (' + statusCode + ')'; + } + let err = new HttpClientError(msg, statusCode); + err.result = response.result; + reject(err); + } + else { + resolve(response); + } + }); + } +} +exports.HttpClient = HttpClient; + + +/***/ }), + +/***/ 28606: +/***/ ((__unused_webpack_module, exports) => { + +"use strict"; + +Object.defineProperty(exports, "__esModule", ({ value: true })); +function getProxyUrl(reqUrl) { + let usingSsl = reqUrl.protocol === 'https:'; + let proxyUrl; + if (checkBypass(reqUrl)) { + return proxyUrl; + } + let proxyVar; + if (usingSsl) { + proxyVar = process.env['https_proxy'] || process.env['HTTPS_PROXY']; + } + else { + proxyVar = process.env['http_proxy'] || process.env['HTTP_PROXY']; + } + if (proxyVar) { + proxyUrl = new URL(proxyVar); + } + return proxyUrl; +} +exports.getProxyUrl = getProxyUrl; +function checkBypass(reqUrl) { + if (!reqUrl.hostname) { + return false; + } + let noProxy = process.env['no_proxy'] || process.env['NO_PROXY'] || ''; + if (!noProxy) { + return false; + } + // Determine the request port + let reqPort; + if (reqUrl.port) { + reqPort = Number(reqUrl.port); + } + else if (reqUrl.protocol === 'http:') { + reqPort = 80; + } + else if (reqUrl.protocol === 'https:') { + reqPort = 443; + } + // Format the request hostname and hostname with port + let upperReqHosts = [reqUrl.hostname.toUpperCase()]; + if (typeof reqPort === 'number') { + upperReqHosts.push(`${upperReqHosts[0]}:${reqPort}`); + } + // Compare request host against noproxy + for (let upperNoProxyItem of noProxy + .split(',') + .map(x => x.trim().toUpperCase()) + .filter(x => x)) { + if (upperReqHosts.some(x => x === upperNoProxyItem)) { + return true; + } + } + return false; +} +exports.checkBypass = checkBypass; + + +/***/ }), + +/***/ 34889: +/***/ (function(__unused_webpack_module, exports, __nccwpck_require__) { + +"use strict"; + +var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) { + function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); } + return new (P || (P = Promise))(function (resolve, reject) { + function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } } + function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } } + function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); } + step((generator = generator.apply(thisArg, _arguments || [])).next()); + }); +}; +var _a; +Object.defineProperty(exports, "__esModule", ({ value: true })); +const assert_1 = __nccwpck_require__(39491); +const fs = __nccwpck_require__(57147); +const path = __nccwpck_require__(71017); +_a = fs.promises, exports.chmod = _a.chmod, exports.copyFile = _a.copyFile, exports.lstat = _a.lstat, exports.mkdir = _a.mkdir, exports.readdir = _a.readdir, exports.readlink = _a.readlink, exports.rename = _a.rename, exports.rmdir = _a.rmdir, exports.stat = _a.stat, exports.symlink = _a.symlink, exports.unlink = _a.unlink; +exports.IS_WINDOWS = process.platform === 'win32'; +function exists(fsPath) { + return __awaiter(this, void 0, void 0, function* () { + try { + yield exports.stat(fsPath); + } + catch (err) { + if (err.code === 'ENOENT') { + return false; + } + throw err; + } + return true; + }); +} +exports.exists = exists; +function isDirectory(fsPath, useStat = false) { + return __awaiter(this, void 0, void 0, function* () { + const stats = useStat ? yield exports.stat(fsPath) : yield exports.lstat(fsPath); + return stats.isDirectory(); + }); +} +exports.isDirectory = isDirectory; +/** + * On OSX/Linux, true if path starts with '/'. On Windows, true for paths like: + * \, \hello, \\hello\share, C:, and C:\hello (and corresponding alternate separator cases). + */ +function isRooted(p) { + p = normalizeSeparators(p); + if (!p) { + throw new Error('isRooted() parameter "p" cannot be empty'); + } + if (exports.IS_WINDOWS) { + return (p.startsWith('\\') || /^[A-Z]:/i.test(p) // e.g. \ or \hello or \\hello + ); // e.g. C: or C:\hello + } + return p.startsWith('/'); +} +exports.isRooted = isRooted; +/** + * Recursively create a directory at `fsPath`. + * + * This implementation is optimistic, meaning it attempts to create the full + * path first, and backs up the path stack from there. + * + * @param fsPath The path to create + * @param maxDepth The maximum recursion depth + * @param depth The current recursion depth + */ +function mkdirP(fsPath, maxDepth = 1000, depth = 1) { + return __awaiter(this, void 0, void 0, function* () { + assert_1.ok(fsPath, 'a path argument must be provided'); + fsPath = path.resolve(fsPath); + if (depth >= maxDepth) + return exports.mkdir(fsPath); + try { + yield exports.mkdir(fsPath); + return; + } + catch (err) { + switch (err.code) { + case 'ENOENT': { + yield mkdirP(path.dirname(fsPath), maxDepth, depth + 1); + yield exports.mkdir(fsPath); + return; + } + default: { + let stats; + try { + stats = yield exports.stat(fsPath); + } + catch (err2) { + throw err; + } + if (!stats.isDirectory()) + throw err; + } + } + } + }); +} +exports.mkdirP = mkdirP; +/** + * Best effort attempt to determine whether a file exists and is executable. + * @param filePath file path to check + * @param extensions additional file extensions to try + * @return if file exists and is executable, returns the file path. otherwise empty string. + */ +function tryGetExecutablePath(filePath, extensions) { + return __awaiter(this, void 0, void 0, function* () { + let stats = undefined; + try { + // test file exists + stats = yield exports.stat(filePath); + } + catch (err) { + if (err.code !== 'ENOENT') { + // eslint-disable-next-line no-console + console.log(`Unexpected error attempting to determine if executable file exists '${filePath}': ${err}`); + } + } + if (stats && stats.isFile()) { + if (exports.IS_WINDOWS) { + // on Windows, test for valid extension + const upperExt = path.extname(filePath).toUpperCase(); + if (extensions.some(validExt => validExt.toUpperCase() === upperExt)) { + return filePath; + } + } + else { + if (isUnixExecutable(stats)) { + return filePath; + } + } + } + // try each extension + const originalFilePath = filePath; + for (const extension of extensions) { + filePath = originalFilePath + extension; + stats = undefined; + try { + stats = yield exports.stat(filePath); + } + catch (err) { + if (err.code !== 'ENOENT') { + // eslint-disable-next-line no-console + console.log(`Unexpected error attempting to determine if executable file exists '${filePath}': ${err}`); + } + } + if (stats && stats.isFile()) { + if (exports.IS_WINDOWS) { + // preserve the case of the actual file (since an extension was appended) + try { + const directory = path.dirname(filePath); + const upperName = path.basename(filePath).toUpperCase(); + for (const actualName of yield exports.readdir(directory)) { + if (upperName === actualName.toUpperCase()) { + filePath = path.join(directory, actualName); + break; + } + } + } + catch (err) { + // eslint-disable-next-line no-console + console.log(`Unexpected error attempting to determine the actual case of the file '${filePath}': ${err}`); + } + return filePath; + } + else { + if (isUnixExecutable(stats)) { + return filePath; + } + } + } + } + return ''; + }); +} +exports.tryGetExecutablePath = tryGetExecutablePath; +function normalizeSeparators(p) { + p = p || ''; + if (exports.IS_WINDOWS) { + // convert slashes on Windows + p = p.replace(/\//g, '\\'); + // remove redundant slashes + return p.replace(/\\\\+/g, '\\'); + } + // remove redundant slashes + return p.replace(/\/\/+/g, '/'); +} +// on Mac/Linux, test the execute bit +// R W X R W X R W X +// 256 128 64 32 16 8 4 2 1 +function isUnixExecutable(stats) { + return ((stats.mode & 1) > 0 || + ((stats.mode & 8) > 0 && stats.gid === process.getgid()) || + ((stats.mode & 64) > 0 && stats.uid === process.getuid())); +} +//# sourceMappingURL=io-util.js.map + +/***/ }), + +/***/ 27692: +/***/ (function(__unused_webpack_module, exports, __nccwpck_require__) { + +"use strict"; + +var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) { + function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); } + return new (P || (P = Promise))(function (resolve, reject) { + function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } } + function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } } + function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); } + step((generator = generator.apply(thisArg, _arguments || [])).next()); + }); +}; +Object.defineProperty(exports, "__esModule", ({ value: true })); +const childProcess = __nccwpck_require__(32081); +const path = __nccwpck_require__(71017); +const util_1 = __nccwpck_require__(73837); +const ioUtil = __nccwpck_require__(34889); +const exec = util_1.promisify(childProcess.exec); +/** + * Copies a file or folder. + * Based off of shelljs - https://github.com/shelljs/shelljs/blob/9237f66c52e5daa40458f94f9565e18e8132f5a6/src/cp.js + * + * @param source source path + * @param dest destination path + * @param options optional. See CopyOptions. + */ +function cp(source, dest, options = {}) { + return __awaiter(this, void 0, void 0, function* () { + const { force, recursive } = readCopyOptions(options); + const destStat = (yield ioUtil.exists(dest)) ? yield ioUtil.stat(dest) : null; + // Dest is an existing file, but not forcing + if (destStat && destStat.isFile() && !force) { + return; + } + // If dest is an existing directory, should copy inside. + const newDest = destStat && destStat.isDirectory() + ? path.join(dest, path.basename(source)) + : dest; + if (!(yield ioUtil.exists(source))) { + throw new Error(`no such file or directory: ${source}`); + } + const sourceStat = yield ioUtil.stat(source); + if (sourceStat.isDirectory()) { + if (!recursive) { + throw new Error(`Failed to copy. ${source} is a directory, but tried to copy without recursive flag.`); + } + else { + yield cpDirRecursive(source, newDest, 0, force); + } + } + else { + if (path.relative(source, newDest) === '') { + // a file cannot be copied to itself + throw new Error(`'${newDest}' and '${source}' are the same file`); + } + yield copyFile(source, newDest, force); + } + }); +} +exports.cp = cp; +/** + * Moves a path. + * + * @param source source path + * @param dest destination path + * @param options optional. See MoveOptions. + */ +function mv(source, dest, options = {}) { + return __awaiter(this, void 0, void 0, function* () { + if (yield ioUtil.exists(dest)) { + let destExists = true; + if (yield ioUtil.isDirectory(dest)) { + // If dest is directory copy src into dest + dest = path.join(dest, path.basename(source)); + destExists = yield ioUtil.exists(dest); + } + if (destExists) { + if (options.force == null || options.force) { + yield rmRF(dest); + } + else { + throw new Error('Destination already exists'); + } + } + } + yield mkdirP(path.dirname(dest)); + yield ioUtil.rename(source, dest); + }); +} +exports.mv = mv; +/** + * Remove a path recursively with force + * + * @param inputPath path to remove + */ +function rmRF(inputPath) { + return __awaiter(this, void 0, void 0, function* () { + if (ioUtil.IS_WINDOWS) { + // Node doesn't provide a delete operation, only an unlink function. This means that if the file is being used by another + // program (e.g. antivirus), it won't be deleted. To address this, we shell out the work to rd/del. + try { + if (yield ioUtil.isDirectory(inputPath, true)) { + yield exec(`rd /s /q "${inputPath}"`); + } + else { + yield exec(`del /f /a "${inputPath}"`); + } + } + catch (err) { + // if you try to delete a file that doesn't exist, desired result is achieved + // other errors are valid + if (err.code !== 'ENOENT') + throw err; + } + // Shelling out fails to remove a symlink folder with missing source, this unlink catches that + try { + yield ioUtil.unlink(inputPath); + } + catch (err) { + // if you try to delete a file that doesn't exist, desired result is achieved + // other errors are valid + if (err.code !== 'ENOENT') + throw err; + } + } + else { + let isDir = false; + try { + isDir = yield ioUtil.isDirectory(inputPath); + } + catch (err) { + // if you try to delete a file that doesn't exist, desired result is achieved + // other errors are valid + if (err.code !== 'ENOENT') + throw err; + return; + } + if (isDir) { + yield exec(`rm -rf "${inputPath}"`); + } + else { + yield ioUtil.unlink(inputPath); + } + } + }); +} +exports.rmRF = rmRF; +/** + * Make a directory. Creates the full path with folders in between + * Will throw if it fails + * + * @param fsPath path to create + * @returns Promise + */ +function mkdirP(fsPath) { + return __awaiter(this, void 0, void 0, function* () { + yield ioUtil.mkdirP(fsPath); + }); +} +exports.mkdirP = mkdirP; +/** + * Returns path of a tool had the tool actually been invoked. Resolves via paths. + * If you check and the tool does not exist, it will throw. + * + * @param tool name of the tool + * @param check whether to check if tool exists + * @returns Promise path to tool + */ +function which(tool, check) { + return __awaiter(this, void 0, void 0, function* () { + if (!tool) { + throw new Error("parameter 'tool' is required"); + } + // recursive when check=true + if (check) { + const result = yield which(tool, false); + if (!result) { + if (ioUtil.IS_WINDOWS) { + throw new Error(`Unable to locate executable file: ${tool}. Please verify either the file path exists or the file can be found within a directory specified by the PATH environment variable. Also verify the file has a valid extension for an executable file.`); + } + else { + throw new Error(`Unable to locate executable file: ${tool}. Please verify either the file path exists or the file can be found within a directory specified by the PATH environment variable. Also check the file mode to verify the file is executable.`); + } + } + } + try { + // build the list of extensions to try + const extensions = []; + if (ioUtil.IS_WINDOWS && process.env.PATHEXT) { + for (const extension of process.env.PATHEXT.split(path.delimiter)) { + if (extension) { + extensions.push(extension); + } + } + } + // if it's rooted, return it if exists. otherwise return empty. + if (ioUtil.isRooted(tool)) { + const filePath = yield ioUtil.tryGetExecutablePath(tool, extensions); + if (filePath) { + return filePath; + } + return ''; + } + // if any path separators, return empty + if (tool.includes('/') || (ioUtil.IS_WINDOWS && tool.includes('\\'))) { + return ''; + } + // build the list of directories + // + // Note, technically "where" checks the current directory on Windows. From a toolkit perspective, + // it feels like we should not do this. Checking the current directory seems like more of a use + // case of a shell, and the which() function exposed by the toolkit should strive for consistency + // across platforms. + const directories = []; + if (process.env.PATH) { + for (const p of process.env.PATH.split(path.delimiter)) { + if (p) { + directories.push(p); + } + } + } + // return the first match + for (const directory of directories) { + const filePath = yield ioUtil.tryGetExecutablePath(directory + path.sep + tool, extensions); + if (filePath) { + return filePath; + } + } + return ''; + } + catch (err) { + throw new Error(`which failed with message ${err.message}`); + } + }); +} +exports.which = which; +function readCopyOptions(options) { + const force = options.force == null ? true : options.force; + const recursive = Boolean(options.recursive); + return { force, recursive }; +} +function cpDirRecursive(sourceDir, destDir, currentDepth, force) { + return __awaiter(this, void 0, void 0, function* () { + // Ensure there is not a run away recursive copy + if (currentDepth >= 255) + return; + currentDepth++; + yield mkdirP(destDir); + const files = yield ioUtil.readdir(sourceDir); + for (const fileName of files) { + const srcFile = `${sourceDir}/${fileName}`; + const destFile = `${destDir}/${fileName}`; + const srcFileStat = yield ioUtil.lstat(srcFile); + if (srcFileStat.isDirectory()) { + // Recurse + yield cpDirRecursive(srcFile, destFile, currentDepth, force); + } + else { + yield copyFile(srcFile, destFile, force); + } + } + // Change the mode for the newly created directory + yield ioUtil.chmod(destDir, (yield ioUtil.stat(sourceDir)).mode); + }); +} +// Buffered file copy +function copyFile(srcFile, destFile, force) { + return __awaiter(this, void 0, void 0, function* () { + if ((yield ioUtil.lstat(srcFile)).isSymbolicLink()) { + // unlink/re-link it + try { + yield ioUtil.lstat(destFile); + yield ioUtil.unlink(destFile); + } + catch (e) { + // Try to override file permission + if (e.code === 'EPERM') { + yield ioUtil.chmod(destFile, '0666'); + yield ioUtil.unlink(destFile); + } + // other errors = it doesn't exist, no work to do + } + // Copy over symlink + const symlinkFull = yield ioUtil.readlink(srcFile); + yield ioUtil.symlink(symlinkFull, destFile, ioUtil.IS_WINDOWS ? 'junction' : null); + } + else if (!(yield ioUtil.exists(destFile)) || force) { + yield ioUtil.copyFile(srcFile, destFile); + } + }); +} +//# sourceMappingURL=io.js.map + +/***/ }), + +/***/ 28788: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +"use strict"; + + +var compileSchema = __nccwpck_require__(93066) + , resolve = __nccwpck_require__(64119) + , Cache = __nccwpck_require__(79012) + , SchemaObject = __nccwpck_require__(37966) + , stableStringify = __nccwpck_require__(24643) + , formats = __nccwpck_require__(95552) + , rules = __nccwpck_require__(52568) + , $dataMetaSchema = __nccwpck_require__(83368) + , util = __nccwpck_require__(84656); + +module.exports = Ajv; + +Ajv.prototype.validate = validate; +Ajv.prototype.compile = compile; +Ajv.prototype.addSchema = addSchema; +Ajv.prototype.addMetaSchema = addMetaSchema; +Ajv.prototype.validateSchema = validateSchema; +Ajv.prototype.getSchema = getSchema; +Ajv.prototype.removeSchema = removeSchema; +Ajv.prototype.addFormat = addFormat; +Ajv.prototype.errorsText = errorsText; + +Ajv.prototype._addSchema = _addSchema; +Ajv.prototype._compile = _compile; + +Ajv.prototype.compileAsync = __nccwpck_require__(45335); +var customKeyword = __nccwpck_require__(77824); +Ajv.prototype.addKeyword = customKeyword.add; +Ajv.prototype.getKeyword = customKeyword.get; +Ajv.prototype.removeKeyword = customKeyword.remove; +Ajv.prototype.validateKeyword = customKeyword.validate; + +var errorClasses = __nccwpck_require__(55034); +Ajv.ValidationError = errorClasses.Validation; +Ajv.MissingRefError = errorClasses.MissingRef; +Ajv.$dataMetaSchema = $dataMetaSchema; + +var META_SCHEMA_ID = 'http://json-schema.org/draft-07/schema'; + +var META_IGNORE_OPTIONS = [ 'removeAdditional', 'useDefaults', 'coerceTypes', 'strictDefaults' ]; +var META_SUPPORT_DATA = ['/properties']; + +/** + * Creates validator instance. + * Usage: `Ajv(opts)` + * @param {Object} opts optional options + * @return {Object} ajv instance + */ +function Ajv(opts) { + if (!(this instanceof Ajv)) return new Ajv(opts); + opts = this._opts = util.copy(opts) || {}; + setLogger(this); + this._schemas = {}; + this._refs = {}; + this._fragments = {}; + this._formats = formats(opts.format); + + this._cache = opts.cache || new Cache; + this._loadingSchemas = {}; + this._compilations = []; + this.RULES = rules(); + this._getId = chooseGetId(opts); + + opts.loopRequired = opts.loopRequired || Infinity; + if (opts.errorDataPath == 'property') opts._errorDataPathProperty = true; + if (opts.serialize === undefined) opts.serialize = stableStringify; + this._metaOpts = getMetaSchemaOptions(this); + + if (opts.formats) addInitialFormats(this); + if (opts.keywords) addInitialKeywords(this); + addDefaultMetaSchema(this); + if (typeof opts.meta == 'object') this.addMetaSchema(opts.meta); + if (opts.nullable) this.addKeyword('nullable', {metaSchema: {type: 'boolean'}}); + addInitialSchemas(this); +} + + + +/** + * Validate data using schema + * Schema will be compiled and cached (using serialized JSON as key. [fast-json-stable-stringify](https://github.com/epoberezkin/fast-json-stable-stringify) is used to serialize. + * @this Ajv + * @param {String|Object} schemaKeyRef key, ref or schema object + * @param {Any} data to be validated + * @return {Boolean} validation result. Errors from the last validation will be available in `ajv.errors` (and also in compiled schema: `schema.errors`). + */ +function validate(schemaKeyRef, data) { + var v; + if (typeof schemaKeyRef == 'string') { + v = this.getSchema(schemaKeyRef); + if (!v) throw new Error('no schema with key or ref "' + schemaKeyRef + '"'); + } else { + var schemaObj = this._addSchema(schemaKeyRef); + v = schemaObj.validate || this._compile(schemaObj); + } + + var valid = v(data); + if (v.$async !== true) this.errors = v.errors; + return valid; +} + + +/** + * Create validating function for passed schema. + * @this Ajv + * @param {Object} schema schema object + * @param {Boolean} _meta true if schema is a meta-schema. Used internally to compile meta schemas of custom keywords. + * @return {Function} validating function + */ +function compile(schema, _meta) { + var schemaObj = this._addSchema(schema, undefined, _meta); + return schemaObj.validate || this._compile(schemaObj); +} + + +/** + * Adds schema to the instance. + * @this Ajv + * @param {Object|Array} schema schema or array of schemas. If array is passed, `key` and other parameters will be ignored. + * @param {String} key Optional schema key. Can be passed to `validate` method instead of schema object or id/ref. One schema per instance can have empty `id` and `key`. + * @param {Boolean} _skipValidation true to skip schema validation. Used internally, option validateSchema should be used instead. + * @param {Boolean} _meta true if schema is a meta-schema. Used internally, addMetaSchema should be used instead. + * @return {Ajv} this for method chaining + */ +function addSchema(schema, key, _skipValidation, _meta) { + if (Array.isArray(schema)){ + for (var i=0; i} errors optional array of validation errors, if not passed errors from the instance are used. + * @param {Object} options optional options with properties `separator` and `dataVar`. + * @return {String} human readable string with all errors descriptions + */ +function errorsText(errors, options) { + errors = errors || this.errors; + if (!errors) return 'No errors'; + options = options || {}; + var separator = options.separator === undefined ? ', ' : options.separator; + var dataVar = options.dataVar === undefined ? 'data' : options.dataVar; + + var text = ''; + for (var i=0; i { + +"use strict"; + + + +var Cache = module.exports = function Cache() { + this._cache = {}; +}; + + +Cache.prototype.put = function Cache_put(key, value) { + this._cache[key] = value; +}; + + +Cache.prototype.get = function Cache_get(key) { + return this._cache[key]; +}; + + +Cache.prototype.del = function Cache_del(key) { + delete this._cache[key]; +}; + + +Cache.prototype.clear = function Cache_clear() { + this._cache = {}; +}; + + +/***/ }), + +/***/ 45335: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +"use strict"; + + +var MissingRefError = (__nccwpck_require__(55034).MissingRef); + +module.exports = compileAsync; + + +/** + * Creates validating function for passed schema with asynchronous loading of missing schemas. + * `loadSchema` option should be a function that accepts schema uri and returns promise that resolves with the schema. + * @this Ajv + * @param {Object} schema schema object + * @param {Boolean} meta optional true to compile meta-schema; this parameter can be skipped + * @param {Function} callback an optional node-style callback, it is called with 2 parameters: error (or null) and validating function. + * @return {Promise} promise that resolves with a validating function. + */ +function compileAsync(schema, meta, callback) { + /* eslint no-shadow: 0 */ + /* global Promise */ + /* jshint validthis: true */ + var self = this; + if (typeof this._opts.loadSchema != 'function') + throw new Error('options.loadSchema should be a function'); + + if (typeof meta == 'function') { + callback = meta; + meta = undefined; + } + + var p = loadMetaSchemaOf(schema).then(function () { + var schemaObj = self._addSchema(schema, undefined, meta); + return schemaObj.validate || _compileAsync(schemaObj); + }); + + if (callback) { + p.then( + function(v) { callback(null, v); }, + callback + ); + } + + return p; + + + function loadMetaSchemaOf(sch) { + var $schema = sch.$schema; + return $schema && !self.getSchema($schema) + ? compileAsync.call(self, { $ref: $schema }, true) + : Promise.resolve(); + } + + + function _compileAsync(schemaObj) { + try { return self._compile(schemaObj); } + catch(e) { + if (e instanceof MissingRefError) return loadMissingSchema(e); + throw e; + } + + + function loadMissingSchema(e) { + var ref = e.missingSchema; + if (added(ref)) throw new Error('Schema ' + ref + ' is loaded but ' + e.missingRef + ' cannot be resolved'); + + var schemaPromise = self._loadingSchemas[ref]; + if (!schemaPromise) { + schemaPromise = self._loadingSchemas[ref] = self._opts.loadSchema(ref); + schemaPromise.then(removePromise, removePromise); + } + + return schemaPromise.then(function (sch) { + if (!added(ref)) { + return loadMetaSchemaOf(sch).then(function () { + if (!added(ref)) self.addSchema(sch, ref, undefined, meta); + }); + } + }).then(function() { + return _compileAsync(schemaObj); + }); + + function removePromise() { + delete self._loadingSchemas[ref]; + } + + function added(ref) { + return self._refs[ref] || self._schemas[ref]; + } + } + } +} + + +/***/ }), + +/***/ 55034: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +"use strict"; + + +var resolve = __nccwpck_require__(64119); + +module.exports = { + Validation: errorSubclass(ValidationError), + MissingRef: errorSubclass(MissingRefError) +}; + + +function ValidationError(errors) { + this.message = 'validation failed'; + this.errors = errors; + this.ajv = this.validation = true; +} + + +MissingRefError.message = function (baseId, ref) { + return 'can\'t resolve reference ' + ref + ' from id ' + baseId; +}; + + +function MissingRefError(baseId, ref, message) { + this.message = message || MissingRefError.message(baseId, ref); + this.missingRef = resolve.url(baseId, ref); + this.missingSchema = resolve.normalizeId(resolve.fullPath(this.missingRef)); +} + + +function errorSubclass(Subclass) { + Subclass.prototype = Object.create(Error.prototype); + Subclass.prototype.constructor = Subclass; + return Subclass; +} + + +/***/ }), + +/***/ 95552: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +"use strict"; + + +var util = __nccwpck_require__(84656); + +var DATE = /^(\d\d\d\d)-(\d\d)-(\d\d)$/; +var DAYS = [0,31,28,31,30,31,30,31,31,30,31,30,31]; +var TIME = /^(\d\d):(\d\d):(\d\d)(\.\d+)?(z|[+-]\d\d(?::?\d\d)?)?$/i; +var HOSTNAME = /^(?=.{1,253}\.?$)[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?(?:\.[a-z0-9](?:[-0-9a-z]{0,61}[0-9a-z])?)*\.?$/i; +var URI = /^(?:[a-z][a-z0-9+\-.]*:)(?:\/?\/(?:(?:[a-z0-9\-._~!$&'()*+,;=:]|%[0-9a-f]{2})*@)?(?:\[(?:(?:(?:(?:[0-9a-f]{1,4}:){6}|::(?:[0-9a-f]{1,4}:){5}|(?:[0-9a-f]{1,4})?::(?:[0-9a-f]{1,4}:){4}|(?:(?:[0-9a-f]{1,4}:){0,1}[0-9a-f]{1,4})?::(?:[0-9a-f]{1,4}:){3}|(?:(?:[0-9a-f]{1,4}:){0,2}[0-9a-f]{1,4})?::(?:[0-9a-f]{1,4}:){2}|(?:(?:[0-9a-f]{1,4}:){0,3}[0-9a-f]{1,4})?::[0-9a-f]{1,4}:|(?:(?:[0-9a-f]{1,4}:){0,4}[0-9a-f]{1,4})?::)(?:[0-9a-f]{1,4}:[0-9a-f]{1,4}|(?:(?:25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}(?:25[0-5]|2[0-4]\d|[01]?\d\d?))|(?:(?:[0-9a-f]{1,4}:){0,5}[0-9a-f]{1,4})?::[0-9a-f]{1,4}|(?:(?:[0-9a-f]{1,4}:){0,6}[0-9a-f]{1,4})?::)|[Vv][0-9a-f]+\.[a-z0-9\-._~!$&'()*+,;=:]+)\]|(?:(?:25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}(?:25[0-5]|2[0-4]\d|[01]?\d\d?)|(?:[a-z0-9\-._~!$&'()*+,;=]|%[0-9a-f]{2})*)(?::\d*)?(?:\/(?:[a-z0-9\-._~!$&'()*+,;=:@]|%[0-9a-f]{2})*)*|\/(?:(?:[a-z0-9\-._~!$&'()*+,;=:@]|%[0-9a-f]{2})+(?:\/(?:[a-z0-9\-._~!$&'()*+,;=:@]|%[0-9a-f]{2})*)*)?|(?:[a-z0-9\-._~!$&'()*+,;=:@]|%[0-9a-f]{2})+(?:\/(?:[a-z0-9\-._~!$&'()*+,;=:@]|%[0-9a-f]{2})*)*)(?:\?(?:[a-z0-9\-._~!$&'()*+,;=:@/?]|%[0-9a-f]{2})*)?(?:#(?:[a-z0-9\-._~!$&'()*+,;=:@/?]|%[0-9a-f]{2})*)?$/i; +var URIREF = /^(?:[a-z][a-z0-9+\-.]*:)?(?:\/?\/(?:(?:[a-z0-9\-._~!$&'()*+,;=:]|%[0-9a-f]{2})*@)?(?:\[(?:(?:(?:(?:[0-9a-f]{1,4}:){6}|::(?:[0-9a-f]{1,4}:){5}|(?:[0-9a-f]{1,4})?::(?:[0-9a-f]{1,4}:){4}|(?:(?:[0-9a-f]{1,4}:){0,1}[0-9a-f]{1,4})?::(?:[0-9a-f]{1,4}:){3}|(?:(?:[0-9a-f]{1,4}:){0,2}[0-9a-f]{1,4})?::(?:[0-9a-f]{1,4}:){2}|(?:(?:[0-9a-f]{1,4}:){0,3}[0-9a-f]{1,4})?::[0-9a-f]{1,4}:|(?:(?:[0-9a-f]{1,4}:){0,4}[0-9a-f]{1,4})?::)(?:[0-9a-f]{1,4}:[0-9a-f]{1,4}|(?:(?:25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}(?:25[0-5]|2[0-4]\d|[01]?\d\d?))|(?:(?:[0-9a-f]{1,4}:){0,5}[0-9a-f]{1,4})?::[0-9a-f]{1,4}|(?:(?:[0-9a-f]{1,4}:){0,6}[0-9a-f]{1,4})?::)|[Vv][0-9a-f]+\.[a-z0-9\-._~!$&'()*+,;=:]+)\]|(?:(?:25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}(?:25[0-5]|2[0-4]\d|[01]?\d\d?)|(?:[a-z0-9\-._~!$&'"()*+,;=]|%[0-9a-f]{2})*)(?::\d*)?(?:\/(?:[a-z0-9\-._~!$&'"()*+,;=:@]|%[0-9a-f]{2})*)*|\/(?:(?:[a-z0-9\-._~!$&'"()*+,;=:@]|%[0-9a-f]{2})+(?:\/(?:[a-z0-9\-._~!$&'"()*+,;=:@]|%[0-9a-f]{2})*)*)?|(?:[a-z0-9\-._~!$&'"()*+,;=:@]|%[0-9a-f]{2})+(?:\/(?:[a-z0-9\-._~!$&'"()*+,;=:@]|%[0-9a-f]{2})*)*)?(?:\?(?:[a-z0-9\-._~!$&'"()*+,;=:@/?]|%[0-9a-f]{2})*)?(?:#(?:[a-z0-9\-._~!$&'"()*+,;=:@/?]|%[0-9a-f]{2})*)?$/i; +// uri-template: https://tools.ietf.org/html/rfc6570 +var URITEMPLATE = /^(?:(?:[^\x00-\x20"'<>%\\^`{|}]|%[0-9a-f]{2})|\{[+#./;?&=,!@|]?(?:[a-z0-9_]|%[0-9a-f]{2})+(?::[1-9][0-9]{0,3}|\*)?(?:,(?:[a-z0-9_]|%[0-9a-f]{2})+(?::[1-9][0-9]{0,3}|\*)?)*\})*$/i; +// For the source: https://gist.github.com/dperini/729294 +// For test cases: https://mathiasbynens.be/demo/url-regex +// @todo Delete current URL in favour of the commented out URL rule when this issue is fixed https://github.com/eslint/eslint/issues/7983. +// var URL = /^(?:(?:https?|ftp):\/\/)(?:\S+(?::\S*)?@)?(?:(?!10(?:\.\d{1,3}){3})(?!127(?:\.\d{1,3}){3})(?!169\.254(?:\.\d{1,3}){2})(?!192\.168(?:\.\d{1,3}){2})(?!172\.(?:1[6-9]|2\d|3[0-1])(?:\.\d{1,3}){2})(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))|(?:(?:[a-z\u{00a1}-\u{ffff}0-9]+-?)*[a-z\u{00a1}-\u{ffff}0-9]+)(?:\.(?:[a-z\u{00a1}-\u{ffff}0-9]+-?)*[a-z\u{00a1}-\u{ffff}0-9]+)*(?:\.(?:[a-z\u{00a1}-\u{ffff}]{2,})))(?::\d{2,5})?(?:\/[^\s]*)?$/iu; +var URL = /^(?:(?:http[s\u017F]?|ftp):\/\/)(?:(?:[\0-\x08\x0E-\x1F!-\x9F\xA1-\u167F\u1681-\u1FFF\u200B-\u2027\u202A-\u202E\u2030-\u205E\u2060-\u2FFF\u3001-\uD7FF\uE000-\uFEFE\uFF00-\uFFFF]|[\uD800-\uDBFF][\uDC00-\uDFFF]|[\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?:[^\uD800-\uDBFF]|^)[\uDC00-\uDFFF])+(?::(?:[\0-\x08\x0E-\x1F!-\x9F\xA1-\u167F\u1681-\u1FFF\u200B-\u2027\u202A-\u202E\u2030-\u205E\u2060-\u2FFF\u3001-\uD7FF\uE000-\uFEFE\uFF00-\uFFFF]|[\uD800-\uDBFF][\uDC00-\uDFFF]|[\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?:[^\uD800-\uDBFF]|^)[\uDC00-\uDFFF])*)?@)?(?:(?!10(?:\.[0-9]{1,3}){3})(?!127(?:\.[0-9]{1,3}){3})(?!169\.254(?:\.[0-9]{1,3}){2})(?!192\.168(?:\.[0-9]{1,3}){2})(?!172\.(?:1[6-9]|2[0-9]|3[01])(?:\.[0-9]{1,3}){2})(?:[1-9][0-9]?|1[0-9][0-9]|2[01][0-9]|22[0-3])(?:\.(?:1?[0-9]{1,2}|2[0-4][0-9]|25[0-5])){2}(?:\.(?:[1-9][0-9]?|1[0-9][0-9]|2[0-4][0-9]|25[0-4]))|(?:(?:(?:[0-9KSa-z\xA1-\uD7FF\uE000-\uFFFF]|[\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?:[^\uD800-\uDBFF]|^)[\uDC00-\uDFFF])+-?)*(?:[0-9KSa-z\xA1-\uD7FF\uE000-\uFFFF]|[\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?:[^\uD800-\uDBFF]|^)[\uDC00-\uDFFF])+)(?:\.(?:(?:[0-9KSa-z\xA1-\uD7FF\uE000-\uFFFF]|[\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?:[^\uD800-\uDBFF]|^)[\uDC00-\uDFFF])+-?)*(?:[0-9KSa-z\xA1-\uD7FF\uE000-\uFFFF]|[\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?:[^\uD800-\uDBFF]|^)[\uDC00-\uDFFF])+)*(?:\.(?:(?:[KSa-z\xA1-\uD7FF\uE000-\uFFFF]|[\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?:[^\uD800-\uDBFF]|^)[\uDC00-\uDFFF]){2,})))(?::[0-9]{2,5})?(?:\/(?:[\0-\x08\x0E-\x1F!-\x9F\xA1-\u167F\u1681-\u1FFF\u200B-\u2027\u202A-\u202E\u2030-\u205E\u2060-\u2FFF\u3001-\uD7FF\uE000-\uFEFE\uFF00-\uFFFF]|[\uD800-\uDBFF][\uDC00-\uDFFF]|[\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?:[^\uD800-\uDBFF]|^)[\uDC00-\uDFFF])*)?$/i; +var UUID = /^(?:urn:uuid:)?[0-9a-f]{8}-(?:[0-9a-f]{4}-){3}[0-9a-f]{12}$/i; +var JSON_POINTER = /^(?:\/(?:[^~/]|~0|~1)*)*$/; +var JSON_POINTER_URI_FRAGMENT = /^#(?:\/(?:[a-z0-9_\-.!$&'()*+,;:=@]|%[0-9a-f]{2}|~0|~1)*)*$/i; +var RELATIVE_JSON_POINTER = /^(?:0|[1-9][0-9]*)(?:#|(?:\/(?:[^~/]|~0|~1)*)*)$/; + + +module.exports = formats; + +function formats(mode) { + mode = mode == 'full' ? 'full' : 'fast'; + return util.copy(formats[mode]); +} + + +formats.fast = { + // date: http://tools.ietf.org/html/rfc3339#section-5.6 + date: /^\d\d\d\d-[0-1]\d-[0-3]\d$/, + // date-time: http://tools.ietf.org/html/rfc3339#section-5.6 + time: /^(?:[0-2]\d:[0-5]\d:[0-5]\d|23:59:60)(?:\.\d+)?(?:z|[+-]\d\d(?::?\d\d)?)?$/i, + 'date-time': /^\d\d\d\d-[0-1]\d-[0-3]\d[t\s](?:[0-2]\d:[0-5]\d:[0-5]\d|23:59:60)(?:\.\d+)?(?:z|[+-]\d\d(?::?\d\d)?)$/i, + // uri: https://github.com/mafintosh/is-my-json-valid/blob/master/formats.js + uri: /^(?:[a-z][a-z0-9+-.]*:)(?:\/?\/)?[^\s]*$/i, + 'uri-reference': /^(?:(?:[a-z][a-z0-9+-.]*:)?\/?\/)?(?:[^\\\s#][^\s#]*)?(?:#[^\\\s]*)?$/i, + 'uri-template': URITEMPLATE, + url: URL, + // email (sources from jsen validator): + // http://stackoverflow.com/questions/201323/using-a-regular-expression-to-validate-an-email-address#answer-8829363 + // http://www.w3.org/TR/html5/forms.html#valid-e-mail-address (search for 'willful violation') + email: /^[a-z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?(?:\.[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?)*$/i, + hostname: HOSTNAME, + // optimized https://www.safaribooksonline.com/library/view/regular-expressions-cookbook/9780596802837/ch07s16.html + ipv4: /^(?:(?:25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}(?:25[0-5]|2[0-4]\d|[01]?\d\d?)$/, + // optimized http://stackoverflow.com/questions/53497/regular-expression-that-matches-valid-ipv6-addresses + ipv6: /^\s*(?:(?:(?:[0-9a-f]{1,4}:){7}(?:[0-9a-f]{1,4}|:))|(?:(?:[0-9a-f]{1,4}:){6}(?::[0-9a-f]{1,4}|(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(?:(?:[0-9a-f]{1,4}:){5}(?:(?:(?::[0-9a-f]{1,4}){1,2})|:(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(?:(?:[0-9a-f]{1,4}:){4}(?:(?:(?::[0-9a-f]{1,4}){1,3})|(?:(?::[0-9a-f]{1,4})?:(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(?:(?:[0-9a-f]{1,4}:){3}(?:(?:(?::[0-9a-f]{1,4}){1,4})|(?:(?::[0-9a-f]{1,4}){0,2}:(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(?:(?:[0-9a-f]{1,4}:){2}(?:(?:(?::[0-9a-f]{1,4}){1,5})|(?:(?::[0-9a-f]{1,4}){0,3}:(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(?:(?:[0-9a-f]{1,4}:){1}(?:(?:(?::[0-9a-f]{1,4}){1,6})|(?:(?::[0-9a-f]{1,4}){0,4}:(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(?::(?:(?:(?::[0-9a-f]{1,4}){1,7})|(?:(?::[0-9a-f]{1,4}){0,5}:(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:)))(?:%.+)?\s*$/i, + regex: regex, + // uuid: http://tools.ietf.org/html/rfc4122 + uuid: UUID, + // JSON-pointer: https://tools.ietf.org/html/rfc6901 + // uri fragment: https://tools.ietf.org/html/rfc3986#appendix-A + 'json-pointer': JSON_POINTER, + 'json-pointer-uri-fragment': JSON_POINTER_URI_FRAGMENT, + // relative JSON-pointer: http://tools.ietf.org/html/draft-luff-relative-json-pointer-00 + 'relative-json-pointer': RELATIVE_JSON_POINTER +}; + + +formats.full = { + date: date, + time: time, + 'date-time': date_time, + uri: uri, + 'uri-reference': URIREF, + 'uri-template': URITEMPLATE, + url: URL, + email: /^[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?$/i, + hostname: HOSTNAME, + ipv4: /^(?:(?:25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}(?:25[0-5]|2[0-4]\d|[01]?\d\d?)$/, + ipv6: /^\s*(?:(?:(?:[0-9a-f]{1,4}:){7}(?:[0-9a-f]{1,4}|:))|(?:(?:[0-9a-f]{1,4}:){6}(?::[0-9a-f]{1,4}|(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(?:(?:[0-9a-f]{1,4}:){5}(?:(?:(?::[0-9a-f]{1,4}){1,2})|:(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(?:(?:[0-9a-f]{1,4}:){4}(?:(?:(?::[0-9a-f]{1,4}){1,3})|(?:(?::[0-9a-f]{1,4})?:(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(?:(?:[0-9a-f]{1,4}:){3}(?:(?:(?::[0-9a-f]{1,4}){1,4})|(?:(?::[0-9a-f]{1,4}){0,2}:(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(?:(?:[0-9a-f]{1,4}:){2}(?:(?:(?::[0-9a-f]{1,4}){1,5})|(?:(?::[0-9a-f]{1,4}){0,3}:(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(?:(?:[0-9a-f]{1,4}:){1}(?:(?:(?::[0-9a-f]{1,4}){1,6})|(?:(?::[0-9a-f]{1,4}){0,4}:(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(?::(?:(?:(?::[0-9a-f]{1,4}){1,7})|(?:(?::[0-9a-f]{1,4}){0,5}:(?:(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:)))(?:%.+)?\s*$/i, + regex: regex, + uuid: UUID, + 'json-pointer': JSON_POINTER, + 'json-pointer-uri-fragment': JSON_POINTER_URI_FRAGMENT, + 'relative-json-pointer': RELATIVE_JSON_POINTER +}; + + +function isLeapYear(year) { + // https://tools.ietf.org/html/rfc3339#appendix-C + return year % 4 === 0 && (year % 100 !== 0 || year % 400 === 0); +} + + +function date(str) { + // full-date from http://tools.ietf.org/html/rfc3339#section-5.6 + var matches = str.match(DATE); + if (!matches) return false; + + var year = +matches[1]; + var month = +matches[2]; + var day = +matches[3]; + + return month >= 1 && month <= 12 && day >= 1 && + day <= (month == 2 && isLeapYear(year) ? 29 : DAYS[month]); +} + + +function time(str, full) { + var matches = str.match(TIME); + if (!matches) return false; + + var hour = matches[1]; + var minute = matches[2]; + var second = matches[3]; + var timeZone = matches[5]; + return ((hour <= 23 && minute <= 59 && second <= 59) || + (hour == 23 && minute == 59 && second == 60)) && + (!full || timeZone); +} + + +var DATE_TIME_SEPARATOR = /t|\s/i; +function date_time(str) { + // http://tools.ietf.org/html/rfc3339#section-5.6 + var dateTime = str.split(DATE_TIME_SEPARATOR); + return dateTime.length == 2 && date(dateTime[0]) && time(dateTime[1], true); +} + + +var NOT_URI_FRAGMENT = /\/|:/; +function uri(str) { + // http://jmrware.com/articles/2009/uri_regexp/URI_regex.html + optional protocol + required "." + return NOT_URI_FRAGMENT.test(str) && URI.test(str); +} + + +var Z_ANCHOR = /[^\\]\\Z/; +function regex(str) { + if (Z_ANCHOR.test(str)) return false; + try { + new RegExp(str); + return true; + } catch(e) { + return false; + } +} + + +/***/ }), + +/***/ 93066: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +"use strict"; + + +var resolve = __nccwpck_require__(64119) + , util = __nccwpck_require__(84656) + , errorClasses = __nccwpck_require__(55034) + , stableStringify = __nccwpck_require__(24643); + +var validateGenerator = __nccwpck_require__(17302); + +/** + * Functions below are used inside compiled validations function + */ + +var ucs2length = util.ucs2length; +var equal = __nccwpck_require__(15584); + +// this error is thrown by async schemas to return validation errors via exception +var ValidationError = errorClasses.Validation; + +module.exports = compile; + + +/** + * Compiles schema to validation function + * @this Ajv + * @param {Object} schema schema object + * @param {Object} root object with information about the root schema for this schema + * @param {Object} localRefs the hash of local references inside the schema (created by resolve.id), used for inline resolution + * @param {String} baseId base ID for IDs in the schema + * @return {Function} validation function + */ +function compile(schema, root, localRefs, baseId) { + /* jshint validthis: true, evil: true */ + /* eslint no-shadow: 0 */ + var self = this + , opts = this._opts + , refVal = [ undefined ] + , refs = {} + , patterns = [] + , patternsHash = {} + , defaults = [] + , defaultsHash = {} + , customRules = []; + + root = root || { schema: schema, refVal: refVal, refs: refs }; + + var c = checkCompiling.call(this, schema, root, baseId); + var compilation = this._compilations[c.index]; + if (c.compiling) return (compilation.callValidate = callValidate); + + var formats = this._formats; + var RULES = this.RULES; + + try { + var v = localCompile(schema, root, localRefs, baseId); + compilation.validate = v; + var cv = compilation.callValidate; + if (cv) { + cv.schema = v.schema; + cv.errors = null; + cv.refs = v.refs; + cv.refVal = v.refVal; + cv.root = v.root; + cv.$async = v.$async; + if (opts.sourceCode) cv.source = v.source; + } + return v; + } finally { + endCompiling.call(this, schema, root, baseId); + } + + /* @this {*} - custom context, see passContext option */ + function callValidate() { + /* jshint validthis: true */ + var validate = compilation.validate; + var result = validate.apply(this, arguments); + callValidate.errors = validate.errors; + return result; + } + + function localCompile(_schema, _root, localRefs, baseId) { + var isRoot = !_root || (_root && _root.schema == _schema); + if (_root.schema != root.schema) + return compile.call(self, _schema, _root, localRefs, baseId); + + var $async = _schema.$async === true; + + var sourceCode = validateGenerator({ + isTop: true, + schema: _schema, + isRoot: isRoot, + baseId: baseId, + root: _root, + schemaPath: '', + errSchemaPath: '#', + errorPath: '""', + MissingRefError: errorClasses.MissingRef, + RULES: RULES, + validate: validateGenerator, + util: util, + resolve: resolve, + resolveRef: resolveRef, + usePattern: usePattern, + useDefault: useDefault, + useCustomRule: useCustomRule, + opts: opts, + formats: formats, + logger: self.logger, + self: self + }); + + sourceCode = vars(refVal, refValCode) + vars(patterns, patternCode) + + vars(defaults, defaultCode) + vars(customRules, customRuleCode) + + sourceCode; + + if (opts.processCode) sourceCode = opts.processCode(sourceCode, _schema); + // console.log('\n\n\n *** \n', JSON.stringify(sourceCode)); + var validate; + try { + var makeValidate = new Function( + 'self', + 'RULES', + 'formats', + 'root', + 'refVal', + 'defaults', + 'customRules', + 'equal', + 'ucs2length', + 'ValidationError', + sourceCode + ); + + validate = makeValidate( + self, + RULES, + formats, + root, + refVal, + defaults, + customRules, + equal, + ucs2length, + ValidationError + ); + + refVal[0] = validate; + } catch(e) { + self.logger.error('Error compiling schema, function code:', sourceCode); + throw e; + } + + validate.schema = _schema; + validate.errors = null; + validate.refs = refs; + validate.refVal = refVal; + validate.root = isRoot ? validate : _root; + if ($async) validate.$async = true; + if (opts.sourceCode === true) { + validate.source = { + code: sourceCode, + patterns: patterns, + defaults: defaults + }; + } + + return validate; + } + + function resolveRef(baseId, ref, isRoot) { + ref = resolve.url(baseId, ref); + var refIndex = refs[ref]; + var _refVal, refCode; + if (refIndex !== undefined) { + _refVal = refVal[refIndex]; + refCode = 'refVal[' + refIndex + ']'; + return resolvedRef(_refVal, refCode); + } + if (!isRoot && root.refs) { + var rootRefId = root.refs[ref]; + if (rootRefId !== undefined) { + _refVal = root.refVal[rootRefId]; + refCode = addLocalRef(ref, _refVal); + return resolvedRef(_refVal, refCode); + } + } + + refCode = addLocalRef(ref); + var v = resolve.call(self, localCompile, root, ref); + if (v === undefined) { + var localSchema = localRefs && localRefs[ref]; + if (localSchema) { + v = resolve.inlineRef(localSchema, opts.inlineRefs) + ? localSchema + : compile.call(self, localSchema, root, localRefs, baseId); + } + } + + if (v === undefined) { + removeLocalRef(ref); + } else { + replaceLocalRef(ref, v); + return resolvedRef(v, refCode); + } + } + + function addLocalRef(ref, v) { + var refId = refVal.length; + refVal[refId] = v; + refs[ref] = refId; + return 'refVal' + refId; + } + + function removeLocalRef(ref) { + delete refs[ref]; + } + + function replaceLocalRef(ref, v) { + var refId = refs[ref]; + refVal[refId] = v; + } + + function resolvedRef(refVal, code) { + return typeof refVal == 'object' || typeof refVal == 'boolean' + ? { code: code, schema: refVal, inline: true } + : { code: code, $async: refVal && !!refVal.$async }; + } + + function usePattern(regexStr) { + var index = patternsHash[regexStr]; + if (index === undefined) { + index = patternsHash[regexStr] = patterns.length; + patterns[index] = regexStr; + } + return 'pattern' + index; + } + + function useDefault(value) { + switch (typeof value) { + case 'boolean': + case 'number': + return '' + value; + case 'string': + return util.toQuotedString(value); + case 'object': + if (value === null) return 'null'; + var valueStr = stableStringify(value); + var index = defaultsHash[valueStr]; + if (index === undefined) { + index = defaultsHash[valueStr] = defaults.length; + defaults[index] = value; + } + return 'default' + index; + } + } + + function useCustomRule(rule, schema, parentSchema, it) { + if (self._opts.validateSchema !== false) { + var deps = rule.definition.dependencies; + if (deps && !deps.every(function(keyword) { + return Object.prototype.hasOwnProperty.call(parentSchema, keyword); + })) + throw new Error('parent schema must have all required keywords: ' + deps.join(',')); + + var validateSchema = rule.definition.validateSchema; + if (validateSchema) { + var valid = validateSchema(schema); + if (!valid) { + var message = 'keyword schema is invalid: ' + self.errorsText(validateSchema.errors); + if (self._opts.validateSchema == 'log') self.logger.error(message); + else throw new Error(message); + } + } + } + + var compile = rule.definition.compile + , inline = rule.definition.inline + , macro = rule.definition.macro; + + var validate; + if (compile) { + validate = compile.call(self, schema, parentSchema, it); + } else if (macro) { + validate = macro.call(self, schema, parentSchema, it); + if (opts.validateSchema !== false) self.validateSchema(validate, true); + } else if (inline) { + validate = inline.call(self, it, rule.keyword, schema, parentSchema); + } else { + validate = rule.definition.validate; + if (!validate) return; + } + + if (validate === undefined) + throw new Error('custom keyword "' + rule.keyword + '"failed to compile'); + + var index = customRules.length; + customRules[index] = validate; + + return { + code: 'customRule' + index, + validate: validate + }; + } +} + + +/** + * Checks if the schema is currently compiled + * @this Ajv + * @param {Object} schema schema to compile + * @param {Object} root root object + * @param {String} baseId base schema ID + * @return {Object} object with properties "index" (compilation index) and "compiling" (boolean) + */ +function checkCompiling(schema, root, baseId) { + /* jshint validthis: true */ + var index = compIndex.call(this, schema, root, baseId); + if (index >= 0) return { index: index, compiling: true }; + index = this._compilations.length; + this._compilations[index] = { + schema: schema, + root: root, + baseId: baseId + }; + return { index: index, compiling: false }; +} + + +/** + * Removes the schema from the currently compiled list + * @this Ajv + * @param {Object} schema schema to compile + * @param {Object} root root object + * @param {String} baseId base schema ID + */ +function endCompiling(schema, root, baseId) { + /* jshint validthis: true */ + var i = compIndex.call(this, schema, root, baseId); + if (i >= 0) this._compilations.splice(i, 1); +} + + +/** + * Index of schema compilation in the currently compiled list + * @this Ajv + * @param {Object} schema schema to compile + * @param {Object} root root object + * @param {String} baseId base schema ID + * @return {Integer} compilation index + */ +function compIndex(schema, root, baseId) { + /* jshint validthis: true */ + for (var i=0; i { + +"use strict"; + + +var URI = __nccwpck_require__(39222) + , equal = __nccwpck_require__(15584) + , util = __nccwpck_require__(84656) + , SchemaObject = __nccwpck_require__(37966) + , traverse = __nccwpck_require__(82141); + +module.exports = resolve; + +resolve.normalizeId = normalizeId; +resolve.fullPath = getFullPath; +resolve.url = resolveUrl; +resolve.ids = resolveIds; +resolve.inlineRef = inlineRef; +resolve.schema = resolveSchema; + +/** + * [resolve and compile the references ($ref)] + * @this Ajv + * @param {Function} compile reference to schema compilation funciton (localCompile) + * @param {Object} root object with information about the root schema for the current schema + * @param {String} ref reference to resolve + * @return {Object|Function} schema object (if the schema can be inlined) or validation function + */ +function resolve(compile, root, ref) { + /* jshint validthis: true */ + var refVal = this._refs[ref]; + if (typeof refVal == 'string') { + if (this._refs[refVal]) refVal = this._refs[refVal]; + else return resolve.call(this, compile, root, refVal); + } + + refVal = refVal || this._schemas[ref]; + if (refVal instanceof SchemaObject) { + return inlineRef(refVal.schema, this._opts.inlineRefs) + ? refVal.schema + : refVal.validate || this._compile(refVal); + } + + var res = resolveSchema.call(this, root, ref); + var schema, v, baseId; + if (res) { + schema = res.schema; + root = res.root; + baseId = res.baseId; + } + + if (schema instanceof SchemaObject) { + v = schema.validate || compile.call(this, schema.schema, root, undefined, baseId); + } else if (schema !== undefined) { + v = inlineRef(schema, this._opts.inlineRefs) + ? schema + : compile.call(this, schema, root, undefined, baseId); + } + + return v; +} + + +/** + * Resolve schema, its root and baseId + * @this Ajv + * @param {Object} root root object with properties schema, refVal, refs + * @param {String} ref reference to resolve + * @return {Object} object with properties schema, root, baseId + */ +function resolveSchema(root, ref) { + /* jshint validthis: true */ + var p = URI.parse(ref) + , refPath = _getFullPath(p) + , baseId = getFullPath(this._getId(root.schema)); + if (Object.keys(root.schema).length === 0 || refPath !== baseId) { + var id = normalizeId(refPath); + var refVal = this._refs[id]; + if (typeof refVal == 'string') { + return resolveRecursive.call(this, root, refVal, p); + } else if (refVal instanceof SchemaObject) { + if (!refVal.validate) this._compile(refVal); + root = refVal; + } else { + refVal = this._schemas[id]; + if (refVal instanceof SchemaObject) { + if (!refVal.validate) this._compile(refVal); + if (id == normalizeId(ref)) + return { schema: refVal, root: root, baseId: baseId }; + root = refVal; + } else { + return; + } + } + if (!root.schema) return; + baseId = getFullPath(this._getId(root.schema)); + } + return getJsonPointer.call(this, p, baseId, root.schema, root); +} + + +/* @this Ajv */ +function resolveRecursive(root, ref, parsedRef) { + /* jshint validthis: true */ + var res = resolveSchema.call(this, root, ref); + if (res) { + var schema = res.schema; + var baseId = res.baseId; + root = res.root; + var id = this._getId(schema); + if (id) baseId = resolveUrl(baseId, id); + return getJsonPointer.call(this, parsedRef, baseId, schema, root); + } +} + + +var PREVENT_SCOPE_CHANGE = util.toHash(['properties', 'patternProperties', 'enum', 'dependencies', 'definitions']); +/* @this Ajv */ +function getJsonPointer(parsedRef, baseId, schema, root) { + /* jshint validthis: true */ + parsedRef.fragment = parsedRef.fragment || ''; + if (parsedRef.fragment.slice(0,1) != '/') return; + var parts = parsedRef.fragment.split('/'); + + for (var i = 1; i < parts.length; i++) { + var part = parts[i]; + if (part) { + part = util.unescapeFragment(part); + schema = schema[part]; + if (schema === undefined) break; + var id; + if (!PREVENT_SCOPE_CHANGE[part]) { + id = this._getId(schema); + if (id) baseId = resolveUrl(baseId, id); + if (schema.$ref) { + var $ref = resolveUrl(baseId, schema.$ref); + var res = resolveSchema.call(this, root, $ref); + if (res) { + schema = res.schema; + root = res.root; + baseId = res.baseId; + } + } + } + } + } + if (schema !== undefined && schema !== root.schema) + return { schema: schema, root: root, baseId: baseId }; +} + + +var SIMPLE_INLINED = util.toHash([ + 'type', 'format', 'pattern', + 'maxLength', 'minLength', + 'maxProperties', 'minProperties', + 'maxItems', 'minItems', + 'maximum', 'minimum', + 'uniqueItems', 'multipleOf', + 'required', 'enum' +]); +function inlineRef(schema, limit) { + if (limit === false) return false; + if (limit === undefined || limit === true) return checkNoRef(schema); + else if (limit) return countKeys(schema) <= limit; +} + + +function checkNoRef(schema) { + var item; + if (Array.isArray(schema)) { + for (var i=0; i { + +"use strict"; + + +var ruleModules = __nccwpck_require__(63829) + , toHash = (__nccwpck_require__(84656).toHash); + +module.exports = function rules() { + var RULES = [ + { type: 'number', + rules: [ { 'maximum': ['exclusiveMaximum'] }, + { 'minimum': ['exclusiveMinimum'] }, 'multipleOf', 'format'] }, + { type: 'string', + rules: [ 'maxLength', 'minLength', 'pattern', 'format' ] }, + { type: 'array', + rules: [ 'maxItems', 'minItems', 'items', 'contains', 'uniqueItems' ] }, + { type: 'object', + rules: [ 'maxProperties', 'minProperties', 'required', 'dependencies', 'propertyNames', + { 'properties': ['additionalProperties', 'patternProperties'] } ] }, + { rules: [ '$ref', 'const', 'enum', 'not', 'anyOf', 'oneOf', 'allOf', 'if' ] } + ]; + + var ALL = [ 'type', '$comment' ]; + var KEYWORDS = [ + '$schema', '$id', 'id', '$data', '$async', 'title', + 'description', 'default', 'definitions', + 'examples', 'readOnly', 'writeOnly', + 'contentMediaType', 'contentEncoding', + 'additionalItems', 'then', 'else' + ]; + var TYPES = [ 'number', 'integer', 'string', 'array', 'object', 'boolean', 'null' ]; + RULES.all = toHash(ALL); + RULES.types = toHash(TYPES); + + RULES.forEach(function (group) { + group.rules = group.rules.map(function (keyword) { + var implKeywords; + if (typeof keyword == 'object') { + var key = Object.keys(keyword)[0]; + implKeywords = keyword[key]; + keyword = key; + implKeywords.forEach(function (k) { + ALL.push(k); + RULES.all[k] = true; + }); + } + ALL.push(keyword); + var rule = RULES.all[keyword] = { + keyword: keyword, + code: ruleModules[keyword], + implements: implKeywords + }; + return rule; + }); + + RULES.all.$comment = { + keyword: '$comment', + code: ruleModules.$comment + }; + + if (group.type) RULES.types[group.type] = group; + }); + + RULES.keywords = toHash(ALL.concat(KEYWORDS)); + RULES.custom = {}; + + return RULES; +}; + + +/***/ }), + +/***/ 37966: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +"use strict"; + + +var util = __nccwpck_require__(84656); + +module.exports = SchemaObject; + +function SchemaObject(obj) { + util.copy(obj, this); +} + + +/***/ }), + +/***/ 89308: +/***/ ((module) => { + +"use strict"; + + +// https://mathiasbynens.be/notes/javascript-encoding +// https://github.com/bestiejs/punycode.js - punycode.ucs2.decode +module.exports = function ucs2length(str) { + var length = 0 + , len = str.length + , pos = 0 + , value; + while (pos < len) { + length++; + value = str.charCodeAt(pos++); + if (value >= 0xD800 && value <= 0xDBFF && pos < len) { + // high surrogate, and there is a next character + value = str.charCodeAt(pos); + if ((value & 0xFC00) == 0xDC00) pos++; // low surrogate + } + } + return length; +}; + + +/***/ }), + +/***/ 84656: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +"use strict"; + + + +module.exports = { + copy: copy, + checkDataType: checkDataType, + checkDataTypes: checkDataTypes, + coerceToTypes: coerceToTypes, + toHash: toHash, + getProperty: getProperty, + escapeQuotes: escapeQuotes, + equal: __nccwpck_require__(15584), + ucs2length: __nccwpck_require__(89308), + varOccurences: varOccurences, + varReplace: varReplace, + schemaHasRules: schemaHasRules, + schemaHasRulesExcept: schemaHasRulesExcept, + schemaUnknownRules: schemaUnknownRules, + toQuotedString: toQuotedString, + getPathExpr: getPathExpr, + getPath: getPath, + getData: getData, + unescapeFragment: unescapeFragment, + unescapeJsonPointer: unescapeJsonPointer, + escapeFragment: escapeFragment, + escapeJsonPointer: escapeJsonPointer +}; + + +function copy(o, to) { + to = to || {}; + for (var key in o) to[key] = o[key]; + return to; +} + + +function checkDataType(dataType, data, strictNumbers, negate) { + var EQUAL = negate ? ' !== ' : ' === ' + , AND = negate ? ' || ' : ' && ' + , OK = negate ? '!' : '' + , NOT = negate ? '' : '!'; + switch (dataType) { + case 'null': return data + EQUAL + 'null'; + case 'array': return OK + 'Array.isArray(' + data + ')'; + case 'object': return '(' + OK + data + AND + + 'typeof ' + data + EQUAL + '"object"' + AND + + NOT + 'Array.isArray(' + data + '))'; + case 'integer': return '(typeof ' + data + EQUAL + '"number"' + AND + + NOT + '(' + data + ' % 1)' + + AND + data + EQUAL + data + + (strictNumbers ? (AND + OK + 'isFinite(' + data + ')') : '') + ')'; + case 'number': return '(typeof ' + data + EQUAL + '"' + dataType + '"' + + (strictNumbers ? (AND + OK + 'isFinite(' + data + ')') : '') + ')'; + default: return 'typeof ' + data + EQUAL + '"' + dataType + '"'; + } +} + + +function checkDataTypes(dataTypes, data, strictNumbers) { + switch (dataTypes.length) { + case 1: return checkDataType(dataTypes[0], data, strictNumbers, true); + default: + var code = ''; + var types = toHash(dataTypes); + if (types.array && types.object) { + code = types.null ? '(': '(!' + data + ' || '; + code += 'typeof ' + data + ' !== "object")'; + delete types.null; + delete types.array; + delete types.object; + } + if (types.number) delete types.integer; + for (var t in types) + code += (code ? ' && ' : '' ) + checkDataType(t, data, strictNumbers, true); + + return code; + } +} + + +var COERCE_TO_TYPES = toHash([ 'string', 'number', 'integer', 'boolean', 'null' ]); +function coerceToTypes(optionCoerceTypes, dataTypes) { + if (Array.isArray(dataTypes)) { + var types = []; + for (var i=0; i= lvl) throw new Error('Cannot access property/index ' + up + ' levels up, current level is ' + lvl); + return paths[lvl - up]; + } + + if (up > lvl) throw new Error('Cannot access data ' + up + ' levels up, current level is ' + lvl); + data = 'data' + ((lvl - up) || ''); + if (!jsonPointer) return data; + } + + var expr = data; + var segments = jsonPointer.split('/'); + for (var i=0; i { + +"use strict"; + + +var KEYWORDS = [ + 'multipleOf', + 'maximum', + 'exclusiveMaximum', + 'minimum', + 'exclusiveMinimum', + 'maxLength', + 'minLength', + 'pattern', + 'additionalItems', + 'maxItems', + 'minItems', + 'uniqueItems', + 'maxProperties', + 'minProperties', + 'required', + 'additionalProperties', + 'enum', + 'format', + 'const' +]; + +module.exports = function (metaSchema, keywordsJsonPointers) { + for (var i=0; i { + +"use strict"; + + +var metaSchema = __nccwpck_require__(6680); + +module.exports = { + $id: 'https://github.com/ajv-validator/ajv/blob/master/lib/definition_schema.js', + definitions: { + simpleTypes: metaSchema.definitions.simpleTypes + }, + type: 'object', + dependencies: { + schema: ['validate'], + $data: ['validate'], + statements: ['inline'], + valid: {not: {required: ['macro']}} + }, + properties: { + type: metaSchema.properties.type, + schema: {type: 'boolean'}, + statements: {type: 'boolean'}, + dependencies: { + type: 'array', + items: {type: 'string'} + }, + metaSchema: {type: 'object'}, + modifying: {type: 'boolean'}, + valid: {type: 'boolean'}, + $data: {type: 'boolean'}, + async: {type: 'boolean'}, + errors: { + anyOf: [ + {type: 'boolean'}, + {const: 'full'} + ] + } + } +}; + + +/***/ }), + +/***/ 68979: +/***/ ((module) => { + +"use strict"; + +module.exports = function generate__limit(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $errorKeyword; + var $data = 'data' + ($dataLvl || ''); + var $isData = it.opts.$data && $schema && $schema.$data, + $schemaValue; + if ($isData) { + out += ' var schema' + ($lvl) + ' = ' + (it.util.getData($schema.$data, $dataLvl, it.dataPathArr)) + '; '; + $schemaValue = 'schema' + $lvl; + } else { + $schemaValue = $schema; + } + var $isMax = $keyword == 'maximum', + $exclusiveKeyword = $isMax ? 'exclusiveMaximum' : 'exclusiveMinimum', + $schemaExcl = it.schema[$exclusiveKeyword], + $isDataExcl = it.opts.$data && $schemaExcl && $schemaExcl.$data, + $op = $isMax ? '<' : '>', + $notOp = $isMax ? '>' : '<', + $errorKeyword = undefined; + if (!($isData || typeof $schema == 'number' || $schema === undefined)) { + throw new Error($keyword + ' must be number'); + } + if (!($isDataExcl || $schemaExcl === undefined || typeof $schemaExcl == 'number' || typeof $schemaExcl == 'boolean')) { + throw new Error($exclusiveKeyword + ' must be number or boolean'); + } + if ($isDataExcl) { + var $schemaValueExcl = it.util.getData($schemaExcl.$data, $dataLvl, it.dataPathArr), + $exclusive = 'exclusive' + $lvl, + $exclType = 'exclType' + $lvl, + $exclIsNumber = 'exclIsNumber' + $lvl, + $opExpr = 'op' + $lvl, + $opStr = '\' + ' + $opExpr + ' + \''; + out += ' var schemaExcl' + ($lvl) + ' = ' + ($schemaValueExcl) + '; '; + $schemaValueExcl = 'schemaExcl' + $lvl; + out += ' var ' + ($exclusive) + '; var ' + ($exclType) + ' = typeof ' + ($schemaValueExcl) + '; if (' + ($exclType) + ' != \'boolean\' && ' + ($exclType) + ' != \'undefined\' && ' + ($exclType) + ' != \'number\') { '; + var $errorKeyword = $exclusiveKeyword; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ($errorKeyword || '_exclusiveLimit') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: {} '; + if (it.opts.messages !== false) { + out += ' , message: \'' + ($exclusiveKeyword) + ' should be boolean\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + out += ' } else if ( '; + if ($isData) { + out += ' (' + ($schemaValue) + ' !== undefined && typeof ' + ($schemaValue) + ' != \'number\') || '; + } + out += ' ' + ($exclType) + ' == \'number\' ? ( (' + ($exclusive) + ' = ' + ($schemaValue) + ' === undefined || ' + ($schemaValueExcl) + ' ' + ($op) + '= ' + ($schemaValue) + ') ? ' + ($data) + ' ' + ($notOp) + '= ' + ($schemaValueExcl) + ' : ' + ($data) + ' ' + ($notOp) + ' ' + ($schemaValue) + ' ) : ( (' + ($exclusive) + ' = ' + ($schemaValueExcl) + ' === true) ? ' + ($data) + ' ' + ($notOp) + '= ' + ($schemaValue) + ' : ' + ($data) + ' ' + ($notOp) + ' ' + ($schemaValue) + ' ) || ' + ($data) + ' !== ' + ($data) + ') { var op' + ($lvl) + ' = ' + ($exclusive) + ' ? \'' + ($op) + '\' : \'' + ($op) + '=\'; '; + if ($schema === undefined) { + $errorKeyword = $exclusiveKeyword; + $errSchemaPath = it.errSchemaPath + '/' + $exclusiveKeyword; + $schemaValue = $schemaValueExcl; + $isData = $isDataExcl; + } + } else { + var $exclIsNumber = typeof $schemaExcl == 'number', + $opStr = $op; + if ($exclIsNumber && $isData) { + var $opExpr = '\'' + $opStr + '\''; + out += ' if ( '; + if ($isData) { + out += ' (' + ($schemaValue) + ' !== undefined && typeof ' + ($schemaValue) + ' != \'number\') || '; + } + out += ' ( ' + ($schemaValue) + ' === undefined || ' + ($schemaExcl) + ' ' + ($op) + '= ' + ($schemaValue) + ' ? ' + ($data) + ' ' + ($notOp) + '= ' + ($schemaExcl) + ' : ' + ($data) + ' ' + ($notOp) + ' ' + ($schemaValue) + ' ) || ' + ($data) + ' !== ' + ($data) + ') { '; + } else { + if ($exclIsNumber && $schema === undefined) { + $exclusive = true; + $errorKeyword = $exclusiveKeyword; + $errSchemaPath = it.errSchemaPath + '/' + $exclusiveKeyword; + $schemaValue = $schemaExcl; + $notOp += '='; + } else { + if ($exclIsNumber) $schemaValue = Math[$isMax ? 'min' : 'max']($schemaExcl, $schema); + if ($schemaExcl === ($exclIsNumber ? $schemaValue : true)) { + $exclusive = true; + $errorKeyword = $exclusiveKeyword; + $errSchemaPath = it.errSchemaPath + '/' + $exclusiveKeyword; + $notOp += '='; + } else { + $exclusive = false; + $opStr += '='; + } + } + var $opExpr = '\'' + $opStr + '\''; + out += ' if ( '; + if ($isData) { + out += ' (' + ($schemaValue) + ' !== undefined && typeof ' + ($schemaValue) + ' != \'number\') || '; + } + out += ' ' + ($data) + ' ' + ($notOp) + ' ' + ($schemaValue) + ' || ' + ($data) + ' !== ' + ($data) + ') { '; + } + } + $errorKeyword = $errorKeyword || $keyword; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ($errorKeyword || '_limit') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { comparison: ' + ($opExpr) + ', limit: ' + ($schemaValue) + ', exclusive: ' + ($exclusive) + ' } '; + if (it.opts.messages !== false) { + out += ' , message: \'should be ' + ($opStr) + ' '; + if ($isData) { + out += '\' + ' + ($schemaValue); + } else { + out += '' + ($schemaValue) + '\''; + } + } + if (it.opts.verbose) { + out += ' , schema: '; + if ($isData) { + out += 'validate.schema' + ($schemaPath); + } else { + out += '' + ($schema); + } + out += ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + out += ' } '; + if ($breakOnError) { + out += ' else { '; + } + return out; +} + + +/***/ }), + +/***/ 2825: +/***/ ((module) => { + +"use strict"; + +module.exports = function generate__limitItems(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $errorKeyword; + var $data = 'data' + ($dataLvl || ''); + var $isData = it.opts.$data && $schema && $schema.$data, + $schemaValue; + if ($isData) { + out += ' var schema' + ($lvl) + ' = ' + (it.util.getData($schema.$data, $dataLvl, it.dataPathArr)) + '; '; + $schemaValue = 'schema' + $lvl; + } else { + $schemaValue = $schema; + } + if (!($isData || typeof $schema == 'number')) { + throw new Error($keyword + ' must be number'); + } + var $op = $keyword == 'maxItems' ? '>' : '<'; + out += 'if ( '; + if ($isData) { + out += ' (' + ($schemaValue) + ' !== undefined && typeof ' + ($schemaValue) + ' != \'number\') || '; + } + out += ' ' + ($data) + '.length ' + ($op) + ' ' + ($schemaValue) + ') { '; + var $errorKeyword = $keyword; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ($errorKeyword || '_limitItems') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { limit: ' + ($schemaValue) + ' } '; + if (it.opts.messages !== false) { + out += ' , message: \'should NOT have '; + if ($keyword == 'maxItems') { + out += 'more'; + } else { + out += 'fewer'; + } + out += ' than '; + if ($isData) { + out += '\' + ' + ($schemaValue) + ' + \''; + } else { + out += '' + ($schema); + } + out += ' items\' '; + } + if (it.opts.verbose) { + out += ' , schema: '; + if ($isData) { + out += 'validate.schema' + ($schemaPath); + } else { + out += '' + ($schema); + } + out += ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + out += '} '; + if ($breakOnError) { + out += ' else { '; + } + return out; +} + + +/***/ }), + +/***/ 61810: +/***/ ((module) => { + +"use strict"; + +module.exports = function generate__limitLength(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $errorKeyword; + var $data = 'data' + ($dataLvl || ''); + var $isData = it.opts.$data && $schema && $schema.$data, + $schemaValue; + if ($isData) { + out += ' var schema' + ($lvl) + ' = ' + (it.util.getData($schema.$data, $dataLvl, it.dataPathArr)) + '; '; + $schemaValue = 'schema' + $lvl; + } else { + $schemaValue = $schema; + } + if (!($isData || typeof $schema == 'number')) { + throw new Error($keyword + ' must be number'); + } + var $op = $keyword == 'maxLength' ? '>' : '<'; + out += 'if ( '; + if ($isData) { + out += ' (' + ($schemaValue) + ' !== undefined && typeof ' + ($schemaValue) + ' != \'number\') || '; + } + if (it.opts.unicode === false) { + out += ' ' + ($data) + '.length '; + } else { + out += ' ucs2length(' + ($data) + ') '; + } + out += ' ' + ($op) + ' ' + ($schemaValue) + ') { '; + var $errorKeyword = $keyword; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ($errorKeyword || '_limitLength') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { limit: ' + ($schemaValue) + ' } '; + if (it.opts.messages !== false) { + out += ' , message: \'should NOT be '; + if ($keyword == 'maxLength') { + out += 'longer'; + } else { + out += 'shorter'; + } + out += ' than '; + if ($isData) { + out += '\' + ' + ($schemaValue) + ' + \''; + } else { + out += '' + ($schema); + } + out += ' characters\' '; + } + if (it.opts.verbose) { + out += ' , schema: '; + if ($isData) { + out += 'validate.schema' + ($schemaPath); + } else { + out += '' + ($schema); + } + out += ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + out += '} '; + if ($breakOnError) { + out += ' else { '; + } + return out; +} + + +/***/ }), + +/***/ 47881: +/***/ ((module) => { + +"use strict"; + +module.exports = function generate__limitProperties(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $errorKeyword; + var $data = 'data' + ($dataLvl || ''); + var $isData = it.opts.$data && $schema && $schema.$data, + $schemaValue; + if ($isData) { + out += ' var schema' + ($lvl) + ' = ' + (it.util.getData($schema.$data, $dataLvl, it.dataPathArr)) + '; '; + $schemaValue = 'schema' + $lvl; + } else { + $schemaValue = $schema; + } + if (!($isData || typeof $schema == 'number')) { + throw new Error($keyword + ' must be number'); + } + var $op = $keyword == 'maxProperties' ? '>' : '<'; + out += 'if ( '; + if ($isData) { + out += ' (' + ($schemaValue) + ' !== undefined && typeof ' + ($schemaValue) + ' != \'number\') || '; + } + out += ' Object.keys(' + ($data) + ').length ' + ($op) + ' ' + ($schemaValue) + ') { '; + var $errorKeyword = $keyword; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ($errorKeyword || '_limitProperties') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { limit: ' + ($schemaValue) + ' } '; + if (it.opts.messages !== false) { + out += ' , message: \'should NOT have '; + if ($keyword == 'maxProperties') { + out += 'more'; + } else { + out += 'fewer'; + } + out += ' than '; + if ($isData) { + out += '\' + ' + ($schemaValue) + ' + \''; + } else { + out += '' + ($schema); + } + out += ' properties\' '; + } + if (it.opts.verbose) { + out += ' , schema: '; + if ($isData) { + out += 'validate.schema' + ($schemaPath); + } else { + out += '' + ($schema); + } + out += ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + out += '} '; + if ($breakOnError) { + out += ' else { '; + } + return out; +} + + +/***/ }), + +/***/ 49301: +/***/ ((module) => { + +"use strict"; + +module.exports = function generate_allOf(it, $keyword, $ruleType) { + var out = ' '; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $it = it.util.copy(it); + var $closingBraces = ''; + $it.level++; + var $nextValid = 'valid' + $it.level; + var $currentBaseId = $it.baseId, + $allSchemasEmpty = true; + var arr1 = $schema; + if (arr1) { + var $sch, $i = -1, + l1 = arr1.length - 1; + while ($i < l1) { + $sch = arr1[$i += 1]; + if ((it.opts.strictKeywords ? typeof $sch == 'object' && Object.keys($sch).length > 0 : it.util.schemaHasRules($sch, it.RULES.all))) { + $allSchemasEmpty = false; + $it.schema = $sch; + $it.schemaPath = $schemaPath + '[' + $i + ']'; + $it.errSchemaPath = $errSchemaPath + '/' + $i; + out += ' ' + (it.validate($it)) + ' '; + $it.baseId = $currentBaseId; + if ($breakOnError) { + out += ' if (' + ($nextValid) + ') { '; + $closingBraces += '}'; + } + } + } + } + if ($breakOnError) { + if ($allSchemasEmpty) { + out += ' if (true) { '; + } else { + out += ' ' + ($closingBraces.slice(0, -1)) + ' '; + } + } + return out; +} + + +/***/ }), + +/***/ 29490: +/***/ ((module) => { + +"use strict"; + +module.exports = function generate_anyOf(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $data = 'data' + ($dataLvl || ''); + var $valid = 'valid' + $lvl; + var $errs = 'errs__' + $lvl; + var $it = it.util.copy(it); + var $closingBraces = ''; + $it.level++; + var $nextValid = 'valid' + $it.level; + var $noEmptySchema = $schema.every(function($sch) { + return (it.opts.strictKeywords ? typeof $sch == 'object' && Object.keys($sch).length > 0 : it.util.schemaHasRules($sch, it.RULES.all)); + }); + if ($noEmptySchema) { + var $currentBaseId = $it.baseId; + out += ' var ' + ($errs) + ' = errors; var ' + ($valid) + ' = false; '; + var $wasComposite = it.compositeRule; + it.compositeRule = $it.compositeRule = true; + var arr1 = $schema; + if (arr1) { + var $sch, $i = -1, + l1 = arr1.length - 1; + while ($i < l1) { + $sch = arr1[$i += 1]; + $it.schema = $sch; + $it.schemaPath = $schemaPath + '[' + $i + ']'; + $it.errSchemaPath = $errSchemaPath + '/' + $i; + out += ' ' + (it.validate($it)) + ' '; + $it.baseId = $currentBaseId; + out += ' ' + ($valid) + ' = ' + ($valid) + ' || ' + ($nextValid) + '; if (!' + ($valid) + ') { '; + $closingBraces += '}'; + } + } + it.compositeRule = $it.compositeRule = $wasComposite; + out += ' ' + ($closingBraces) + ' if (!' + ($valid) + ') { var err = '; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('anyOf') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: {} '; + if (it.opts.messages !== false) { + out += ' , message: \'should match some schema in anyOf\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + out += '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError(vErrors); '; + } else { + out += ' validate.errors = vErrors; return false; '; + } + } + out += ' } else { errors = ' + ($errs) + '; if (vErrors !== null) { if (' + ($errs) + ') vErrors.length = ' + ($errs) + '; else vErrors = null; } '; + if (it.opts.allErrors) { + out += ' } '; + } + } else { + if ($breakOnError) { + out += ' if (true) { '; + } + } + return out; +} + + +/***/ }), + +/***/ 74587: +/***/ ((module) => { + +"use strict"; + +module.exports = function generate_comment(it, $keyword, $ruleType) { + var out = ' '; + var $schema = it.schema[$keyword]; + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $comment = it.util.toQuotedString($schema); + if (it.opts.$comment === true) { + out += ' console.log(' + ($comment) + ');'; + } else if (typeof it.opts.$comment == 'function') { + out += ' self._opts.$comment(' + ($comment) + ', ' + (it.util.toQuotedString($errSchemaPath)) + ', validate.root.schema);'; + } + return out; +} + + +/***/ }), + +/***/ 36691: +/***/ ((module) => { + +"use strict"; + +module.exports = function generate_const(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $data = 'data' + ($dataLvl || ''); + var $valid = 'valid' + $lvl; + var $isData = it.opts.$data && $schema && $schema.$data, + $schemaValue; + if ($isData) { + out += ' var schema' + ($lvl) + ' = ' + (it.util.getData($schema.$data, $dataLvl, it.dataPathArr)) + '; '; + $schemaValue = 'schema' + $lvl; + } else { + $schemaValue = $schema; + } + if (!$isData) { + out += ' var schema' + ($lvl) + ' = validate.schema' + ($schemaPath) + ';'; + } + out += 'var ' + ($valid) + ' = equal(' + ($data) + ', schema' + ($lvl) + '); if (!' + ($valid) + ') { '; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('const') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { allowedValue: schema' + ($lvl) + ' } '; + if (it.opts.messages !== false) { + out += ' , message: \'should be equal to constant\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + out += ' }'; + if ($breakOnError) { + out += ' else { '; + } + return out; +} + + +/***/ }), + +/***/ 86603: +/***/ ((module) => { + +"use strict"; + +module.exports = function generate_contains(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $data = 'data' + ($dataLvl || ''); + var $valid = 'valid' + $lvl; + var $errs = 'errs__' + $lvl; + var $it = it.util.copy(it); + var $closingBraces = ''; + $it.level++; + var $nextValid = 'valid' + $it.level; + var $idx = 'i' + $lvl, + $dataNxt = $it.dataLevel = it.dataLevel + 1, + $nextData = 'data' + $dataNxt, + $currentBaseId = it.baseId, + $nonEmptySchema = (it.opts.strictKeywords ? typeof $schema == 'object' && Object.keys($schema).length > 0 : it.util.schemaHasRules($schema, it.RULES.all)); + out += 'var ' + ($errs) + ' = errors;var ' + ($valid) + ';'; + if ($nonEmptySchema) { + var $wasComposite = it.compositeRule; + it.compositeRule = $it.compositeRule = true; + $it.schema = $schema; + $it.schemaPath = $schemaPath; + $it.errSchemaPath = $errSchemaPath; + out += ' var ' + ($nextValid) + ' = false; for (var ' + ($idx) + ' = 0; ' + ($idx) + ' < ' + ($data) + '.length; ' + ($idx) + '++) { '; + $it.errorPath = it.util.getPathExpr(it.errorPath, $idx, it.opts.jsonPointers, true); + var $passData = $data + '[' + $idx + ']'; + $it.dataPathArr[$dataNxt] = $idx; + var $code = it.validate($it); + $it.baseId = $currentBaseId; + if (it.util.varOccurences($code, $nextData) < 2) { + out += ' ' + (it.util.varReplace($code, $nextData, $passData)) + ' '; + } else { + out += ' var ' + ($nextData) + ' = ' + ($passData) + '; ' + ($code) + ' '; + } + out += ' if (' + ($nextValid) + ') break; } '; + it.compositeRule = $it.compositeRule = $wasComposite; + out += ' ' + ($closingBraces) + ' if (!' + ($nextValid) + ') {'; + } else { + out += ' if (' + ($data) + '.length == 0) {'; + } + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('contains') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: {} '; + if (it.opts.messages !== false) { + out += ' , message: \'should contain a valid item\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + out += ' } else { '; + if ($nonEmptySchema) { + out += ' errors = ' + ($errs) + '; if (vErrors !== null) { if (' + ($errs) + ') vErrors.length = ' + ($errs) + '; else vErrors = null; } '; + } + if (it.opts.allErrors) { + out += ' } '; + } + return out; +} + + +/***/ }), + +/***/ 26773: +/***/ ((module) => { + +"use strict"; + +module.exports = function generate_custom(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $errorKeyword; + var $data = 'data' + ($dataLvl || ''); + var $valid = 'valid' + $lvl; + var $errs = 'errs__' + $lvl; + var $isData = it.opts.$data && $schema && $schema.$data, + $schemaValue; + if ($isData) { + out += ' var schema' + ($lvl) + ' = ' + (it.util.getData($schema.$data, $dataLvl, it.dataPathArr)) + '; '; + $schemaValue = 'schema' + $lvl; + } else { + $schemaValue = $schema; + } + var $rule = this, + $definition = 'definition' + $lvl, + $rDef = $rule.definition, + $closingBraces = ''; + var $compile, $inline, $macro, $ruleValidate, $validateCode; + if ($isData && $rDef.$data) { + $validateCode = 'keywordValidate' + $lvl; + var $validateSchema = $rDef.validateSchema; + out += ' var ' + ($definition) + ' = RULES.custom[\'' + ($keyword) + '\'].definition; var ' + ($validateCode) + ' = ' + ($definition) + '.validate;'; + } else { + $ruleValidate = it.useCustomRule($rule, $schema, it.schema, it); + if (!$ruleValidate) return; + $schemaValue = 'validate.schema' + $schemaPath; + $validateCode = $ruleValidate.code; + $compile = $rDef.compile; + $inline = $rDef.inline; + $macro = $rDef.macro; + } + var $ruleErrs = $validateCode + '.errors', + $i = 'i' + $lvl, + $ruleErr = 'ruleErr' + $lvl, + $asyncKeyword = $rDef.async; + if ($asyncKeyword && !it.async) throw new Error('async keyword in sync schema'); + if (!($inline || $macro)) { + out += '' + ($ruleErrs) + ' = null;'; + } + out += 'var ' + ($errs) + ' = errors;var ' + ($valid) + ';'; + if ($isData && $rDef.$data) { + $closingBraces += '}'; + out += ' if (' + ($schemaValue) + ' === undefined) { ' + ($valid) + ' = true; } else { '; + if ($validateSchema) { + $closingBraces += '}'; + out += ' ' + ($valid) + ' = ' + ($definition) + '.validateSchema(' + ($schemaValue) + '); if (' + ($valid) + ') { '; + } + } + if ($inline) { + if ($rDef.statements) { + out += ' ' + ($ruleValidate.validate) + ' '; + } else { + out += ' ' + ($valid) + ' = ' + ($ruleValidate.validate) + '; '; + } + } else if ($macro) { + var $it = it.util.copy(it); + var $closingBraces = ''; + $it.level++; + var $nextValid = 'valid' + $it.level; + $it.schema = $ruleValidate.validate; + $it.schemaPath = ''; + var $wasComposite = it.compositeRule; + it.compositeRule = $it.compositeRule = true; + var $code = it.validate($it).replace(/validate\.schema/g, $validateCode); + it.compositeRule = $it.compositeRule = $wasComposite; + out += ' ' + ($code); + } else { + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; + out += ' ' + ($validateCode) + '.call( '; + if (it.opts.passContext) { + out += 'this'; + } else { + out += 'self'; + } + if ($compile || $rDef.schema === false) { + out += ' , ' + ($data) + ' '; + } else { + out += ' , ' + ($schemaValue) + ' , ' + ($data) + ' , validate.schema' + (it.schemaPath) + ' '; + } + out += ' , (dataPath || \'\')'; + if (it.errorPath != '""') { + out += ' + ' + (it.errorPath); + } + var $parentData = $dataLvl ? 'data' + (($dataLvl - 1) || '') : 'parentData', + $parentDataProperty = $dataLvl ? it.dataPathArr[$dataLvl] : 'parentDataProperty'; + out += ' , ' + ($parentData) + ' , ' + ($parentDataProperty) + ' , rootData ) '; + var def_callRuleValidate = out; + out = $$outStack.pop(); + if ($rDef.errors === false) { + out += ' ' + ($valid) + ' = '; + if ($asyncKeyword) { + out += 'await '; + } + out += '' + (def_callRuleValidate) + '; '; + } else { + if ($asyncKeyword) { + $ruleErrs = 'customErrors' + $lvl; + out += ' var ' + ($ruleErrs) + ' = null; try { ' + ($valid) + ' = await ' + (def_callRuleValidate) + '; } catch (e) { ' + ($valid) + ' = false; if (e instanceof ValidationError) ' + ($ruleErrs) + ' = e.errors; else throw e; } '; + } else { + out += ' ' + ($ruleErrs) + ' = null; ' + ($valid) + ' = ' + (def_callRuleValidate) + '; '; + } + } + } + if ($rDef.modifying) { + out += ' if (' + ($parentData) + ') ' + ($data) + ' = ' + ($parentData) + '[' + ($parentDataProperty) + '];'; + } + out += '' + ($closingBraces); + if ($rDef.valid) { + if ($breakOnError) { + out += ' if (true) { '; + } + } else { + out += ' if ( '; + if ($rDef.valid === undefined) { + out += ' !'; + if ($macro) { + out += '' + ($nextValid); + } else { + out += '' + ($valid); + } + } else { + out += ' ' + (!$rDef.valid) + ' '; + } + out += ') { '; + $errorKeyword = $rule.keyword; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ($errorKeyword || 'custom') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { keyword: \'' + ($rule.keyword) + '\' } '; + if (it.opts.messages !== false) { + out += ' , message: \'should pass "' + ($rule.keyword) + '" keyword validation\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + var def_customError = out; + out = $$outStack.pop(); + if ($inline) { + if ($rDef.errors) { + if ($rDef.errors != 'full') { + out += ' for (var ' + ($i) + '=' + ($errs) + '; ' + ($i) + ' { + +"use strict"; + +module.exports = function generate_dependencies(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $data = 'data' + ($dataLvl || ''); + var $errs = 'errs__' + $lvl; + var $it = it.util.copy(it); + var $closingBraces = ''; + $it.level++; + var $nextValid = 'valid' + $it.level; + var $schemaDeps = {}, + $propertyDeps = {}, + $ownProperties = it.opts.ownProperties; + for ($property in $schema) { + if ($property == '__proto__') continue; + var $sch = $schema[$property]; + var $deps = Array.isArray($sch) ? $propertyDeps : $schemaDeps; + $deps[$property] = $sch; + } + out += 'var ' + ($errs) + ' = errors;'; + var $currentErrorPath = it.errorPath; + out += 'var missing' + ($lvl) + ';'; + for (var $property in $propertyDeps) { + $deps = $propertyDeps[$property]; + if ($deps.length) { + out += ' if ( ' + ($data) + (it.util.getProperty($property)) + ' !== undefined '; + if ($ownProperties) { + out += ' && Object.prototype.hasOwnProperty.call(' + ($data) + ', \'' + (it.util.escapeQuotes($property)) + '\') '; + } + if ($breakOnError) { + out += ' && ( '; + var arr1 = $deps; + if (arr1) { + var $propertyKey, $i = -1, + l1 = arr1.length - 1; + while ($i < l1) { + $propertyKey = arr1[$i += 1]; + if ($i) { + out += ' || '; + } + var $prop = it.util.getProperty($propertyKey), + $useData = $data + $prop; + out += ' ( ( ' + ($useData) + ' === undefined '; + if ($ownProperties) { + out += ' || ! Object.prototype.hasOwnProperty.call(' + ($data) + ', \'' + (it.util.escapeQuotes($propertyKey)) + '\') '; + } + out += ') && (missing' + ($lvl) + ' = ' + (it.util.toQuotedString(it.opts.jsonPointers ? $propertyKey : $prop)) + ') ) '; + } + } + out += ')) { '; + var $propertyPath = 'missing' + $lvl, + $missingProperty = '\' + ' + $propertyPath + ' + \''; + if (it.opts._errorDataPathProperty) { + it.errorPath = it.opts.jsonPointers ? it.util.getPathExpr($currentErrorPath, $propertyPath, true) : $currentErrorPath + ' + ' + $propertyPath; + } + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('dependencies') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { property: \'' + (it.util.escapeQuotes($property)) + '\', missingProperty: \'' + ($missingProperty) + '\', depsCount: ' + ($deps.length) + ', deps: \'' + (it.util.escapeQuotes($deps.length == 1 ? $deps[0] : $deps.join(", "))) + '\' } '; + if (it.opts.messages !== false) { + out += ' , message: \'should have '; + if ($deps.length == 1) { + out += 'property ' + (it.util.escapeQuotes($deps[0])); + } else { + out += 'properties ' + (it.util.escapeQuotes($deps.join(", "))); + } + out += ' when property ' + (it.util.escapeQuotes($property)) + ' is present\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + } else { + out += ' ) { '; + var arr2 = $deps; + if (arr2) { + var $propertyKey, i2 = -1, + l2 = arr2.length - 1; + while (i2 < l2) { + $propertyKey = arr2[i2 += 1]; + var $prop = it.util.getProperty($propertyKey), + $missingProperty = it.util.escapeQuotes($propertyKey), + $useData = $data + $prop; + if (it.opts._errorDataPathProperty) { + it.errorPath = it.util.getPath($currentErrorPath, $propertyKey, it.opts.jsonPointers); + } + out += ' if ( ' + ($useData) + ' === undefined '; + if ($ownProperties) { + out += ' || ! Object.prototype.hasOwnProperty.call(' + ($data) + ', \'' + (it.util.escapeQuotes($propertyKey)) + '\') '; + } + out += ') { var err = '; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('dependencies') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { property: \'' + (it.util.escapeQuotes($property)) + '\', missingProperty: \'' + ($missingProperty) + '\', depsCount: ' + ($deps.length) + ', deps: \'' + (it.util.escapeQuotes($deps.length == 1 ? $deps[0] : $deps.join(", "))) + '\' } '; + if (it.opts.messages !== false) { + out += ' , message: \'should have '; + if ($deps.length == 1) { + out += 'property ' + (it.util.escapeQuotes($deps[0])); + } else { + out += 'properties ' + (it.util.escapeQuotes($deps.join(", "))); + } + out += ' when property ' + (it.util.escapeQuotes($property)) + ' is present\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + out += '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; } '; + } + } + } + out += ' } '; + if ($breakOnError) { + $closingBraces += '}'; + out += ' else { '; + } + } + } + it.errorPath = $currentErrorPath; + var $currentBaseId = $it.baseId; + for (var $property in $schemaDeps) { + var $sch = $schemaDeps[$property]; + if ((it.opts.strictKeywords ? typeof $sch == 'object' && Object.keys($sch).length > 0 : it.util.schemaHasRules($sch, it.RULES.all))) { + out += ' ' + ($nextValid) + ' = true; if ( ' + ($data) + (it.util.getProperty($property)) + ' !== undefined '; + if ($ownProperties) { + out += ' && Object.prototype.hasOwnProperty.call(' + ($data) + ', \'' + (it.util.escapeQuotes($property)) + '\') '; + } + out += ') { '; + $it.schema = $sch; + $it.schemaPath = $schemaPath + it.util.getProperty($property); + $it.errSchemaPath = $errSchemaPath + '/' + it.util.escapeFragment($property); + out += ' ' + (it.validate($it)) + ' '; + $it.baseId = $currentBaseId; + out += ' } '; + if ($breakOnError) { + out += ' if (' + ($nextValid) + ') { '; + $closingBraces += '}'; + } + } + } + if ($breakOnError) { + out += ' ' + ($closingBraces) + ' if (' + ($errs) + ' == errors) {'; + } + return out; +} + + +/***/ }), + +/***/ 76335: +/***/ ((module) => { + +"use strict"; + +module.exports = function generate_enum(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $data = 'data' + ($dataLvl || ''); + var $valid = 'valid' + $lvl; + var $isData = it.opts.$data && $schema && $schema.$data, + $schemaValue; + if ($isData) { + out += ' var schema' + ($lvl) + ' = ' + (it.util.getData($schema.$data, $dataLvl, it.dataPathArr)) + '; '; + $schemaValue = 'schema' + $lvl; + } else { + $schemaValue = $schema; + } + var $i = 'i' + $lvl, + $vSchema = 'schema' + $lvl; + if (!$isData) { + out += ' var ' + ($vSchema) + ' = validate.schema' + ($schemaPath) + ';'; + } + out += 'var ' + ($valid) + ';'; + if ($isData) { + out += ' if (schema' + ($lvl) + ' === undefined) ' + ($valid) + ' = true; else if (!Array.isArray(schema' + ($lvl) + ')) ' + ($valid) + ' = false; else {'; + } + out += '' + ($valid) + ' = false;for (var ' + ($i) + '=0; ' + ($i) + '<' + ($vSchema) + '.length; ' + ($i) + '++) if (equal(' + ($data) + ', ' + ($vSchema) + '[' + ($i) + '])) { ' + ($valid) + ' = true; break; }'; + if ($isData) { + out += ' } '; + } + out += ' if (!' + ($valid) + ') { '; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('enum') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { allowedValues: schema' + ($lvl) + ' } '; + if (it.opts.messages !== false) { + out += ' , message: \'should be equal to one of the allowed values\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + out += ' }'; + if ($breakOnError) { + out += ' else { '; + } + return out; +} + + +/***/ }), + +/***/ 97952: +/***/ ((module) => { + +"use strict"; + +module.exports = function generate_format(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $data = 'data' + ($dataLvl || ''); + if (it.opts.format === false) { + if ($breakOnError) { + out += ' if (true) { '; + } + return out; + } + var $isData = it.opts.$data && $schema && $schema.$data, + $schemaValue; + if ($isData) { + out += ' var schema' + ($lvl) + ' = ' + (it.util.getData($schema.$data, $dataLvl, it.dataPathArr)) + '; '; + $schemaValue = 'schema' + $lvl; + } else { + $schemaValue = $schema; + } + var $unknownFormats = it.opts.unknownFormats, + $allowUnknown = Array.isArray($unknownFormats); + if ($isData) { + var $format = 'format' + $lvl, + $isObject = 'isObject' + $lvl, + $formatType = 'formatType' + $lvl; + out += ' var ' + ($format) + ' = formats[' + ($schemaValue) + ']; var ' + ($isObject) + ' = typeof ' + ($format) + ' == \'object\' && !(' + ($format) + ' instanceof RegExp) && ' + ($format) + '.validate; var ' + ($formatType) + ' = ' + ($isObject) + ' && ' + ($format) + '.type || \'string\'; if (' + ($isObject) + ') { '; + if (it.async) { + out += ' var async' + ($lvl) + ' = ' + ($format) + '.async; '; + } + out += ' ' + ($format) + ' = ' + ($format) + '.validate; } if ( '; + if ($isData) { + out += ' (' + ($schemaValue) + ' !== undefined && typeof ' + ($schemaValue) + ' != \'string\') || '; + } + out += ' ('; + if ($unknownFormats != 'ignore') { + out += ' (' + ($schemaValue) + ' && !' + ($format) + ' '; + if ($allowUnknown) { + out += ' && self._opts.unknownFormats.indexOf(' + ($schemaValue) + ') == -1 '; + } + out += ') || '; + } + out += ' (' + ($format) + ' && ' + ($formatType) + ' == \'' + ($ruleType) + '\' && !(typeof ' + ($format) + ' == \'function\' ? '; + if (it.async) { + out += ' (async' + ($lvl) + ' ? await ' + ($format) + '(' + ($data) + ') : ' + ($format) + '(' + ($data) + ')) '; + } else { + out += ' ' + ($format) + '(' + ($data) + ') '; + } + out += ' : ' + ($format) + '.test(' + ($data) + '))))) {'; + } else { + var $format = it.formats[$schema]; + if (!$format) { + if ($unknownFormats == 'ignore') { + it.logger.warn('unknown format "' + $schema + '" ignored in schema at path "' + it.errSchemaPath + '"'); + if ($breakOnError) { + out += ' if (true) { '; + } + return out; + } else if ($allowUnknown && $unknownFormats.indexOf($schema) >= 0) { + if ($breakOnError) { + out += ' if (true) { '; + } + return out; + } else { + throw new Error('unknown format "' + $schema + '" is used in schema at path "' + it.errSchemaPath + '"'); + } + } + var $isObject = typeof $format == 'object' && !($format instanceof RegExp) && $format.validate; + var $formatType = $isObject && $format.type || 'string'; + if ($isObject) { + var $async = $format.async === true; + $format = $format.validate; + } + if ($formatType != $ruleType) { + if ($breakOnError) { + out += ' if (true) { '; + } + return out; + } + if ($async) { + if (!it.async) throw new Error('async format in sync schema'); + var $formatRef = 'formats' + it.util.getProperty($schema) + '.validate'; + out += ' if (!(await ' + ($formatRef) + '(' + ($data) + '))) { '; + } else { + out += ' if (! '; + var $formatRef = 'formats' + it.util.getProperty($schema); + if ($isObject) $formatRef += '.validate'; + if (typeof $format == 'function') { + out += ' ' + ($formatRef) + '(' + ($data) + ') '; + } else { + out += ' ' + ($formatRef) + '.test(' + ($data) + ') '; + } + out += ') { '; + } + } + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('format') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { format: '; + if ($isData) { + out += '' + ($schemaValue); + } else { + out += '' + (it.util.toQuotedString($schema)); + } + out += ' } '; + if (it.opts.messages !== false) { + out += ' , message: \'should match format "'; + if ($isData) { + out += '\' + ' + ($schemaValue) + ' + \''; + } else { + out += '' + (it.util.escapeQuotes($schema)); + } + out += '"\' '; + } + if (it.opts.verbose) { + out += ' , schema: '; + if ($isData) { + out += 'validate.schema' + ($schemaPath); + } else { + out += '' + (it.util.toQuotedString($schema)); + } + out += ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + out += ' } '; + if ($breakOnError) { + out += ' else { '; + } + return out; +} + + +/***/ }), + +/***/ 42173: +/***/ ((module) => { + +"use strict"; + +module.exports = function generate_if(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $data = 'data' + ($dataLvl || ''); + var $valid = 'valid' + $lvl; + var $errs = 'errs__' + $lvl; + var $it = it.util.copy(it); + $it.level++; + var $nextValid = 'valid' + $it.level; + var $thenSch = it.schema['then'], + $elseSch = it.schema['else'], + $thenPresent = $thenSch !== undefined && (it.opts.strictKeywords ? typeof $thenSch == 'object' && Object.keys($thenSch).length > 0 : it.util.schemaHasRules($thenSch, it.RULES.all)), + $elsePresent = $elseSch !== undefined && (it.opts.strictKeywords ? typeof $elseSch == 'object' && Object.keys($elseSch).length > 0 : it.util.schemaHasRules($elseSch, it.RULES.all)), + $currentBaseId = $it.baseId; + if ($thenPresent || $elsePresent) { + var $ifClause; + $it.createErrors = false; + $it.schema = $schema; + $it.schemaPath = $schemaPath; + $it.errSchemaPath = $errSchemaPath; + out += ' var ' + ($errs) + ' = errors; var ' + ($valid) + ' = true; '; + var $wasComposite = it.compositeRule; + it.compositeRule = $it.compositeRule = true; + out += ' ' + (it.validate($it)) + ' '; + $it.baseId = $currentBaseId; + $it.createErrors = true; + out += ' errors = ' + ($errs) + '; if (vErrors !== null) { if (' + ($errs) + ') vErrors.length = ' + ($errs) + '; else vErrors = null; } '; + it.compositeRule = $it.compositeRule = $wasComposite; + if ($thenPresent) { + out += ' if (' + ($nextValid) + ') { '; + $it.schema = it.schema['then']; + $it.schemaPath = it.schemaPath + '.then'; + $it.errSchemaPath = it.errSchemaPath + '/then'; + out += ' ' + (it.validate($it)) + ' '; + $it.baseId = $currentBaseId; + out += ' ' + ($valid) + ' = ' + ($nextValid) + '; '; + if ($thenPresent && $elsePresent) { + $ifClause = 'ifClause' + $lvl; + out += ' var ' + ($ifClause) + ' = \'then\'; '; + } else { + $ifClause = '\'then\''; + } + out += ' } '; + if ($elsePresent) { + out += ' else { '; + } + } else { + out += ' if (!' + ($nextValid) + ') { '; + } + if ($elsePresent) { + $it.schema = it.schema['else']; + $it.schemaPath = it.schemaPath + '.else'; + $it.errSchemaPath = it.errSchemaPath + '/else'; + out += ' ' + (it.validate($it)) + ' '; + $it.baseId = $currentBaseId; + out += ' ' + ($valid) + ' = ' + ($nextValid) + '; '; + if ($thenPresent && $elsePresent) { + $ifClause = 'ifClause' + $lvl; + out += ' var ' + ($ifClause) + ' = \'else\'; '; + } else { + $ifClause = '\'else\''; + } + out += ' } '; + } + out += ' if (!' + ($valid) + ') { var err = '; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('if') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { failingKeyword: ' + ($ifClause) + ' } '; + if (it.opts.messages !== false) { + out += ' , message: \'should match "\' + ' + ($ifClause) + ' + \'" schema\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + out += '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError(vErrors); '; + } else { + out += ' validate.errors = vErrors; return false; '; + } + } + out += ' } '; + if ($breakOnError) { + out += ' else { '; + } + } else { + if ($breakOnError) { + out += ' if (true) { '; + } + } + return out; +} + + +/***/ }), + +/***/ 63829: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +"use strict"; + + +//all requires must be explicit because browserify won't work with dynamic requires +module.exports = { + '$ref': __nccwpck_require__(62571), + allOf: __nccwpck_require__(49301), + anyOf: __nccwpck_require__(29490), + '$comment': __nccwpck_require__(74587), + const: __nccwpck_require__(36691), + contains: __nccwpck_require__(86603), + dependencies: __nccwpck_require__(62059), + 'enum': __nccwpck_require__(76335), + format: __nccwpck_require__(97952), + 'if': __nccwpck_require__(42173), + items: __nccwpck_require__(99578), + maximum: __nccwpck_require__(68979), + minimum: __nccwpck_require__(68979), + maxItems: __nccwpck_require__(2825), + minItems: __nccwpck_require__(2825), + maxLength: __nccwpck_require__(61810), + minLength: __nccwpck_require__(61810), + maxProperties: __nccwpck_require__(47881), + minProperties: __nccwpck_require__(47881), + multipleOf: __nccwpck_require__(84274), + not: __nccwpck_require__(4266), + oneOf: __nccwpck_require__(88356), + pattern: __nccwpck_require__(12451), + properties: __nccwpck_require__(30548), + propertyNames: __nccwpck_require__(75472), + required: __nccwpck_require__(797), + uniqueItems: __nccwpck_require__(53526), + validate: __nccwpck_require__(17302) +}; + + +/***/ }), + +/***/ 99578: +/***/ ((module) => { + +"use strict"; + +module.exports = function generate_items(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $data = 'data' + ($dataLvl || ''); + var $valid = 'valid' + $lvl; + var $errs = 'errs__' + $lvl; + var $it = it.util.copy(it); + var $closingBraces = ''; + $it.level++; + var $nextValid = 'valid' + $it.level; + var $idx = 'i' + $lvl, + $dataNxt = $it.dataLevel = it.dataLevel + 1, + $nextData = 'data' + $dataNxt, + $currentBaseId = it.baseId; + out += 'var ' + ($errs) + ' = errors;var ' + ($valid) + ';'; + if (Array.isArray($schema)) { + var $additionalItems = it.schema.additionalItems; + if ($additionalItems === false) { + out += ' ' + ($valid) + ' = ' + ($data) + '.length <= ' + ($schema.length) + '; '; + var $currErrSchemaPath = $errSchemaPath; + $errSchemaPath = it.errSchemaPath + '/additionalItems'; + out += ' if (!' + ($valid) + ') { '; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('additionalItems') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { limit: ' + ($schema.length) + ' } '; + if (it.opts.messages !== false) { + out += ' , message: \'should NOT have more than ' + ($schema.length) + ' items\' '; + } + if (it.opts.verbose) { + out += ' , schema: false , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + out += ' } '; + $errSchemaPath = $currErrSchemaPath; + if ($breakOnError) { + $closingBraces += '}'; + out += ' else { '; + } + } + var arr1 = $schema; + if (arr1) { + var $sch, $i = -1, + l1 = arr1.length - 1; + while ($i < l1) { + $sch = arr1[$i += 1]; + if ((it.opts.strictKeywords ? typeof $sch == 'object' && Object.keys($sch).length > 0 : it.util.schemaHasRules($sch, it.RULES.all))) { + out += ' ' + ($nextValid) + ' = true; if (' + ($data) + '.length > ' + ($i) + ') { '; + var $passData = $data + '[' + $i + ']'; + $it.schema = $sch; + $it.schemaPath = $schemaPath + '[' + $i + ']'; + $it.errSchemaPath = $errSchemaPath + '/' + $i; + $it.errorPath = it.util.getPathExpr(it.errorPath, $i, it.opts.jsonPointers, true); + $it.dataPathArr[$dataNxt] = $i; + var $code = it.validate($it); + $it.baseId = $currentBaseId; + if (it.util.varOccurences($code, $nextData) < 2) { + out += ' ' + (it.util.varReplace($code, $nextData, $passData)) + ' '; + } else { + out += ' var ' + ($nextData) + ' = ' + ($passData) + '; ' + ($code) + ' '; + } + out += ' } '; + if ($breakOnError) { + out += ' if (' + ($nextValid) + ') { '; + $closingBraces += '}'; + } + } + } + } + if (typeof $additionalItems == 'object' && (it.opts.strictKeywords ? typeof $additionalItems == 'object' && Object.keys($additionalItems).length > 0 : it.util.schemaHasRules($additionalItems, it.RULES.all))) { + $it.schema = $additionalItems; + $it.schemaPath = it.schemaPath + '.additionalItems'; + $it.errSchemaPath = it.errSchemaPath + '/additionalItems'; + out += ' ' + ($nextValid) + ' = true; if (' + ($data) + '.length > ' + ($schema.length) + ') { for (var ' + ($idx) + ' = ' + ($schema.length) + '; ' + ($idx) + ' < ' + ($data) + '.length; ' + ($idx) + '++) { '; + $it.errorPath = it.util.getPathExpr(it.errorPath, $idx, it.opts.jsonPointers, true); + var $passData = $data + '[' + $idx + ']'; + $it.dataPathArr[$dataNxt] = $idx; + var $code = it.validate($it); + $it.baseId = $currentBaseId; + if (it.util.varOccurences($code, $nextData) < 2) { + out += ' ' + (it.util.varReplace($code, $nextData, $passData)) + ' '; + } else { + out += ' var ' + ($nextData) + ' = ' + ($passData) + '; ' + ($code) + ' '; + } + if ($breakOnError) { + out += ' if (!' + ($nextValid) + ') break; '; + } + out += ' } } '; + if ($breakOnError) { + out += ' if (' + ($nextValid) + ') { '; + $closingBraces += '}'; + } + } + } else if ((it.opts.strictKeywords ? typeof $schema == 'object' && Object.keys($schema).length > 0 : it.util.schemaHasRules($schema, it.RULES.all))) { + $it.schema = $schema; + $it.schemaPath = $schemaPath; + $it.errSchemaPath = $errSchemaPath; + out += ' for (var ' + ($idx) + ' = ' + (0) + '; ' + ($idx) + ' < ' + ($data) + '.length; ' + ($idx) + '++) { '; + $it.errorPath = it.util.getPathExpr(it.errorPath, $idx, it.opts.jsonPointers, true); + var $passData = $data + '[' + $idx + ']'; + $it.dataPathArr[$dataNxt] = $idx; + var $code = it.validate($it); + $it.baseId = $currentBaseId; + if (it.util.varOccurences($code, $nextData) < 2) { + out += ' ' + (it.util.varReplace($code, $nextData, $passData)) + ' '; + } else { + out += ' var ' + ($nextData) + ' = ' + ($passData) + '; ' + ($code) + ' '; + } + if ($breakOnError) { + out += ' if (!' + ($nextValid) + ') break; '; + } + out += ' }'; + } + if ($breakOnError) { + out += ' ' + ($closingBraces) + ' if (' + ($errs) + ' == errors) {'; + } + return out; +} + + +/***/ }), + +/***/ 84274: +/***/ ((module) => { + +"use strict"; + +module.exports = function generate_multipleOf(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $data = 'data' + ($dataLvl || ''); + var $isData = it.opts.$data && $schema && $schema.$data, + $schemaValue; + if ($isData) { + out += ' var schema' + ($lvl) + ' = ' + (it.util.getData($schema.$data, $dataLvl, it.dataPathArr)) + '; '; + $schemaValue = 'schema' + $lvl; + } else { + $schemaValue = $schema; + } + if (!($isData || typeof $schema == 'number')) { + throw new Error($keyword + ' must be number'); + } + out += 'var division' + ($lvl) + ';if ('; + if ($isData) { + out += ' ' + ($schemaValue) + ' !== undefined && ( typeof ' + ($schemaValue) + ' != \'number\' || '; + } + out += ' (division' + ($lvl) + ' = ' + ($data) + ' / ' + ($schemaValue) + ', '; + if (it.opts.multipleOfPrecision) { + out += ' Math.abs(Math.round(division' + ($lvl) + ') - division' + ($lvl) + ') > 1e-' + (it.opts.multipleOfPrecision) + ' '; + } else { + out += ' division' + ($lvl) + ' !== parseInt(division' + ($lvl) + ') '; + } + out += ' ) '; + if ($isData) { + out += ' ) '; + } + out += ' ) { '; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('multipleOf') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { multipleOf: ' + ($schemaValue) + ' } '; + if (it.opts.messages !== false) { + out += ' , message: \'should be multiple of '; + if ($isData) { + out += '\' + ' + ($schemaValue); + } else { + out += '' + ($schemaValue) + '\''; + } + } + if (it.opts.verbose) { + out += ' , schema: '; + if ($isData) { + out += 'validate.schema' + ($schemaPath); + } else { + out += '' + ($schema); + } + out += ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + out += '} '; + if ($breakOnError) { + out += ' else { '; + } + return out; +} + + +/***/ }), + +/***/ 4266: +/***/ ((module) => { + +"use strict"; + +module.exports = function generate_not(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $data = 'data' + ($dataLvl || ''); + var $errs = 'errs__' + $lvl; + var $it = it.util.copy(it); + $it.level++; + var $nextValid = 'valid' + $it.level; + if ((it.opts.strictKeywords ? typeof $schema == 'object' && Object.keys($schema).length > 0 : it.util.schemaHasRules($schema, it.RULES.all))) { + $it.schema = $schema; + $it.schemaPath = $schemaPath; + $it.errSchemaPath = $errSchemaPath; + out += ' var ' + ($errs) + ' = errors; '; + var $wasComposite = it.compositeRule; + it.compositeRule = $it.compositeRule = true; + $it.createErrors = false; + var $allErrorsOption; + if ($it.opts.allErrors) { + $allErrorsOption = $it.opts.allErrors; + $it.opts.allErrors = false; + } + out += ' ' + (it.validate($it)) + ' '; + $it.createErrors = true; + if ($allErrorsOption) $it.opts.allErrors = $allErrorsOption; + it.compositeRule = $it.compositeRule = $wasComposite; + out += ' if (' + ($nextValid) + ') { '; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('not') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: {} '; + if (it.opts.messages !== false) { + out += ' , message: \'should NOT be valid\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + out += ' } else { errors = ' + ($errs) + '; if (vErrors !== null) { if (' + ($errs) + ') vErrors.length = ' + ($errs) + '; else vErrors = null; } '; + if (it.opts.allErrors) { + out += ' } '; + } + } else { + out += ' var err = '; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('not') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: {} '; + if (it.opts.messages !== false) { + out += ' , message: \'should NOT be valid\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + out += '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + if ($breakOnError) { + out += ' if (false) { '; + } + } + return out; +} + + +/***/ }), + +/***/ 88356: +/***/ ((module) => { + +"use strict"; + +module.exports = function generate_oneOf(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $data = 'data' + ($dataLvl || ''); + var $valid = 'valid' + $lvl; + var $errs = 'errs__' + $lvl; + var $it = it.util.copy(it); + var $closingBraces = ''; + $it.level++; + var $nextValid = 'valid' + $it.level; + var $currentBaseId = $it.baseId, + $prevValid = 'prevValid' + $lvl, + $passingSchemas = 'passingSchemas' + $lvl; + out += 'var ' + ($errs) + ' = errors , ' + ($prevValid) + ' = false , ' + ($valid) + ' = false , ' + ($passingSchemas) + ' = null; '; + var $wasComposite = it.compositeRule; + it.compositeRule = $it.compositeRule = true; + var arr1 = $schema; + if (arr1) { + var $sch, $i = -1, + l1 = arr1.length - 1; + while ($i < l1) { + $sch = arr1[$i += 1]; + if ((it.opts.strictKeywords ? typeof $sch == 'object' && Object.keys($sch).length > 0 : it.util.schemaHasRules($sch, it.RULES.all))) { + $it.schema = $sch; + $it.schemaPath = $schemaPath + '[' + $i + ']'; + $it.errSchemaPath = $errSchemaPath + '/' + $i; + out += ' ' + (it.validate($it)) + ' '; + $it.baseId = $currentBaseId; + } else { + out += ' var ' + ($nextValid) + ' = true; '; + } + if ($i) { + out += ' if (' + ($nextValid) + ' && ' + ($prevValid) + ') { ' + ($valid) + ' = false; ' + ($passingSchemas) + ' = [' + ($passingSchemas) + ', ' + ($i) + ']; } else { '; + $closingBraces += '}'; + } + out += ' if (' + ($nextValid) + ') { ' + ($valid) + ' = ' + ($prevValid) + ' = true; ' + ($passingSchemas) + ' = ' + ($i) + '; }'; + } + } + it.compositeRule = $it.compositeRule = $wasComposite; + out += '' + ($closingBraces) + 'if (!' + ($valid) + ') { var err = '; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('oneOf') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { passingSchemas: ' + ($passingSchemas) + ' } '; + if (it.opts.messages !== false) { + out += ' , message: \'should match exactly one schema in oneOf\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + out += '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError(vErrors); '; + } else { + out += ' validate.errors = vErrors; return false; '; + } + } + out += '} else { errors = ' + ($errs) + '; if (vErrors !== null) { if (' + ($errs) + ') vErrors.length = ' + ($errs) + '; else vErrors = null; }'; + if (it.opts.allErrors) { + out += ' } '; + } + return out; +} + + +/***/ }), + +/***/ 12451: +/***/ ((module) => { + +"use strict"; + +module.exports = function generate_pattern(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $data = 'data' + ($dataLvl || ''); + var $isData = it.opts.$data && $schema && $schema.$data, + $schemaValue; + if ($isData) { + out += ' var schema' + ($lvl) + ' = ' + (it.util.getData($schema.$data, $dataLvl, it.dataPathArr)) + '; '; + $schemaValue = 'schema' + $lvl; + } else { + $schemaValue = $schema; + } + var $regexp = $isData ? '(new RegExp(' + $schemaValue + '))' : it.usePattern($schema); + out += 'if ( '; + if ($isData) { + out += ' (' + ($schemaValue) + ' !== undefined && typeof ' + ($schemaValue) + ' != \'string\') || '; + } + out += ' !' + ($regexp) + '.test(' + ($data) + ') ) { '; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('pattern') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { pattern: '; + if ($isData) { + out += '' + ($schemaValue); + } else { + out += '' + (it.util.toQuotedString($schema)); + } + out += ' } '; + if (it.opts.messages !== false) { + out += ' , message: \'should match pattern "'; + if ($isData) { + out += '\' + ' + ($schemaValue) + ' + \''; + } else { + out += '' + (it.util.escapeQuotes($schema)); + } + out += '"\' '; + } + if (it.opts.verbose) { + out += ' , schema: '; + if ($isData) { + out += 'validate.schema' + ($schemaPath); + } else { + out += '' + (it.util.toQuotedString($schema)); + } + out += ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + out += '} '; + if ($breakOnError) { + out += ' else { '; + } + return out; +} + + +/***/ }), + +/***/ 30548: +/***/ ((module) => { + +"use strict"; + +module.exports = function generate_properties(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $data = 'data' + ($dataLvl || ''); + var $errs = 'errs__' + $lvl; + var $it = it.util.copy(it); + var $closingBraces = ''; + $it.level++; + var $nextValid = 'valid' + $it.level; + var $key = 'key' + $lvl, + $idx = 'idx' + $lvl, + $dataNxt = $it.dataLevel = it.dataLevel + 1, + $nextData = 'data' + $dataNxt, + $dataProperties = 'dataProperties' + $lvl; + var $schemaKeys = Object.keys($schema || {}).filter(notProto), + $pProperties = it.schema.patternProperties || {}, + $pPropertyKeys = Object.keys($pProperties).filter(notProto), + $aProperties = it.schema.additionalProperties, + $someProperties = $schemaKeys.length || $pPropertyKeys.length, + $noAdditional = $aProperties === false, + $additionalIsSchema = typeof $aProperties == 'object' && Object.keys($aProperties).length, + $removeAdditional = it.opts.removeAdditional, + $checkAdditional = $noAdditional || $additionalIsSchema || $removeAdditional, + $ownProperties = it.opts.ownProperties, + $currentBaseId = it.baseId; + var $required = it.schema.required; + if ($required && !(it.opts.$data && $required.$data) && $required.length < it.opts.loopRequired) { + var $requiredHash = it.util.toHash($required); + } + + function notProto(p) { + return p !== '__proto__'; + } + out += 'var ' + ($errs) + ' = errors;var ' + ($nextValid) + ' = true;'; + if ($ownProperties) { + out += ' var ' + ($dataProperties) + ' = undefined;'; + } + if ($checkAdditional) { + if ($ownProperties) { + out += ' ' + ($dataProperties) + ' = ' + ($dataProperties) + ' || Object.keys(' + ($data) + '); for (var ' + ($idx) + '=0; ' + ($idx) + '<' + ($dataProperties) + '.length; ' + ($idx) + '++) { var ' + ($key) + ' = ' + ($dataProperties) + '[' + ($idx) + ']; '; + } else { + out += ' for (var ' + ($key) + ' in ' + ($data) + ') { '; + } + if ($someProperties) { + out += ' var isAdditional' + ($lvl) + ' = !(false '; + if ($schemaKeys.length) { + if ($schemaKeys.length > 8) { + out += ' || validate.schema' + ($schemaPath) + '.hasOwnProperty(' + ($key) + ') '; + } else { + var arr1 = $schemaKeys; + if (arr1) { + var $propertyKey, i1 = -1, + l1 = arr1.length - 1; + while (i1 < l1) { + $propertyKey = arr1[i1 += 1]; + out += ' || ' + ($key) + ' == ' + (it.util.toQuotedString($propertyKey)) + ' '; + } + } + } + } + if ($pPropertyKeys.length) { + var arr2 = $pPropertyKeys; + if (arr2) { + var $pProperty, $i = -1, + l2 = arr2.length - 1; + while ($i < l2) { + $pProperty = arr2[$i += 1]; + out += ' || ' + (it.usePattern($pProperty)) + '.test(' + ($key) + ') '; + } + } + } + out += ' ); if (isAdditional' + ($lvl) + ') { '; + } + if ($removeAdditional == 'all') { + out += ' delete ' + ($data) + '[' + ($key) + ']; '; + } else { + var $currentErrorPath = it.errorPath; + var $additionalProperty = '\' + ' + $key + ' + \''; + if (it.opts._errorDataPathProperty) { + it.errorPath = it.util.getPathExpr(it.errorPath, $key, it.opts.jsonPointers); + } + if ($noAdditional) { + if ($removeAdditional) { + out += ' delete ' + ($data) + '[' + ($key) + ']; '; + } else { + out += ' ' + ($nextValid) + ' = false; '; + var $currErrSchemaPath = $errSchemaPath; + $errSchemaPath = it.errSchemaPath + '/additionalProperties'; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('additionalProperties') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { additionalProperty: \'' + ($additionalProperty) + '\' } '; + if (it.opts.messages !== false) { + out += ' , message: \''; + if (it.opts._errorDataPathProperty) { + out += 'is an invalid additional property'; + } else { + out += 'should NOT have additional properties'; + } + out += '\' '; + } + if (it.opts.verbose) { + out += ' , schema: false , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + $errSchemaPath = $currErrSchemaPath; + if ($breakOnError) { + out += ' break; '; + } + } + } else if ($additionalIsSchema) { + if ($removeAdditional == 'failing') { + out += ' var ' + ($errs) + ' = errors; '; + var $wasComposite = it.compositeRule; + it.compositeRule = $it.compositeRule = true; + $it.schema = $aProperties; + $it.schemaPath = it.schemaPath + '.additionalProperties'; + $it.errSchemaPath = it.errSchemaPath + '/additionalProperties'; + $it.errorPath = it.opts._errorDataPathProperty ? it.errorPath : it.util.getPathExpr(it.errorPath, $key, it.opts.jsonPointers); + var $passData = $data + '[' + $key + ']'; + $it.dataPathArr[$dataNxt] = $key; + var $code = it.validate($it); + $it.baseId = $currentBaseId; + if (it.util.varOccurences($code, $nextData) < 2) { + out += ' ' + (it.util.varReplace($code, $nextData, $passData)) + ' '; + } else { + out += ' var ' + ($nextData) + ' = ' + ($passData) + '; ' + ($code) + ' '; + } + out += ' if (!' + ($nextValid) + ') { errors = ' + ($errs) + '; if (validate.errors !== null) { if (errors) validate.errors.length = errors; else validate.errors = null; } delete ' + ($data) + '[' + ($key) + ']; } '; + it.compositeRule = $it.compositeRule = $wasComposite; + } else { + $it.schema = $aProperties; + $it.schemaPath = it.schemaPath + '.additionalProperties'; + $it.errSchemaPath = it.errSchemaPath + '/additionalProperties'; + $it.errorPath = it.opts._errorDataPathProperty ? it.errorPath : it.util.getPathExpr(it.errorPath, $key, it.opts.jsonPointers); + var $passData = $data + '[' + $key + ']'; + $it.dataPathArr[$dataNxt] = $key; + var $code = it.validate($it); + $it.baseId = $currentBaseId; + if (it.util.varOccurences($code, $nextData) < 2) { + out += ' ' + (it.util.varReplace($code, $nextData, $passData)) + ' '; + } else { + out += ' var ' + ($nextData) + ' = ' + ($passData) + '; ' + ($code) + ' '; + } + if ($breakOnError) { + out += ' if (!' + ($nextValid) + ') break; '; + } + } + } + it.errorPath = $currentErrorPath; + } + if ($someProperties) { + out += ' } '; + } + out += ' } '; + if ($breakOnError) { + out += ' if (' + ($nextValid) + ') { '; + $closingBraces += '}'; + } + } + var $useDefaults = it.opts.useDefaults && !it.compositeRule; + if ($schemaKeys.length) { + var arr3 = $schemaKeys; + if (arr3) { + var $propertyKey, i3 = -1, + l3 = arr3.length - 1; + while (i3 < l3) { + $propertyKey = arr3[i3 += 1]; + var $sch = $schema[$propertyKey]; + if ((it.opts.strictKeywords ? typeof $sch == 'object' && Object.keys($sch).length > 0 : it.util.schemaHasRules($sch, it.RULES.all))) { + var $prop = it.util.getProperty($propertyKey), + $passData = $data + $prop, + $hasDefault = $useDefaults && $sch.default !== undefined; + $it.schema = $sch; + $it.schemaPath = $schemaPath + $prop; + $it.errSchemaPath = $errSchemaPath + '/' + it.util.escapeFragment($propertyKey); + $it.errorPath = it.util.getPath(it.errorPath, $propertyKey, it.opts.jsonPointers); + $it.dataPathArr[$dataNxt] = it.util.toQuotedString($propertyKey); + var $code = it.validate($it); + $it.baseId = $currentBaseId; + if (it.util.varOccurences($code, $nextData) < 2) { + $code = it.util.varReplace($code, $nextData, $passData); + var $useData = $passData; + } else { + var $useData = $nextData; + out += ' var ' + ($nextData) + ' = ' + ($passData) + '; '; + } + if ($hasDefault) { + out += ' ' + ($code) + ' '; + } else { + if ($requiredHash && $requiredHash[$propertyKey]) { + out += ' if ( ' + ($useData) + ' === undefined '; + if ($ownProperties) { + out += ' || ! Object.prototype.hasOwnProperty.call(' + ($data) + ', \'' + (it.util.escapeQuotes($propertyKey)) + '\') '; + } + out += ') { ' + ($nextValid) + ' = false; '; + var $currentErrorPath = it.errorPath, + $currErrSchemaPath = $errSchemaPath, + $missingProperty = it.util.escapeQuotes($propertyKey); + if (it.opts._errorDataPathProperty) { + it.errorPath = it.util.getPath($currentErrorPath, $propertyKey, it.opts.jsonPointers); + } + $errSchemaPath = it.errSchemaPath + '/required'; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('required') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { missingProperty: \'' + ($missingProperty) + '\' } '; + if (it.opts.messages !== false) { + out += ' , message: \''; + if (it.opts._errorDataPathProperty) { + out += 'is a required property'; + } else { + out += 'should have required property \\\'' + ($missingProperty) + '\\\''; + } + out += '\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + $errSchemaPath = $currErrSchemaPath; + it.errorPath = $currentErrorPath; + out += ' } else { '; + } else { + if ($breakOnError) { + out += ' if ( ' + ($useData) + ' === undefined '; + if ($ownProperties) { + out += ' || ! Object.prototype.hasOwnProperty.call(' + ($data) + ', \'' + (it.util.escapeQuotes($propertyKey)) + '\') '; + } + out += ') { ' + ($nextValid) + ' = true; } else { '; + } else { + out += ' if (' + ($useData) + ' !== undefined '; + if ($ownProperties) { + out += ' && Object.prototype.hasOwnProperty.call(' + ($data) + ', \'' + (it.util.escapeQuotes($propertyKey)) + '\') '; + } + out += ' ) { '; + } + } + out += ' ' + ($code) + ' } '; + } + } + if ($breakOnError) { + out += ' if (' + ($nextValid) + ') { '; + $closingBraces += '}'; + } + } + } + } + if ($pPropertyKeys.length) { + var arr4 = $pPropertyKeys; + if (arr4) { + var $pProperty, i4 = -1, + l4 = arr4.length - 1; + while (i4 < l4) { + $pProperty = arr4[i4 += 1]; + var $sch = $pProperties[$pProperty]; + if ((it.opts.strictKeywords ? typeof $sch == 'object' && Object.keys($sch).length > 0 : it.util.schemaHasRules($sch, it.RULES.all))) { + $it.schema = $sch; + $it.schemaPath = it.schemaPath + '.patternProperties' + it.util.getProperty($pProperty); + $it.errSchemaPath = it.errSchemaPath + '/patternProperties/' + it.util.escapeFragment($pProperty); + if ($ownProperties) { + out += ' ' + ($dataProperties) + ' = ' + ($dataProperties) + ' || Object.keys(' + ($data) + '); for (var ' + ($idx) + '=0; ' + ($idx) + '<' + ($dataProperties) + '.length; ' + ($idx) + '++) { var ' + ($key) + ' = ' + ($dataProperties) + '[' + ($idx) + ']; '; + } else { + out += ' for (var ' + ($key) + ' in ' + ($data) + ') { '; + } + out += ' if (' + (it.usePattern($pProperty)) + '.test(' + ($key) + ')) { '; + $it.errorPath = it.util.getPathExpr(it.errorPath, $key, it.opts.jsonPointers); + var $passData = $data + '[' + $key + ']'; + $it.dataPathArr[$dataNxt] = $key; + var $code = it.validate($it); + $it.baseId = $currentBaseId; + if (it.util.varOccurences($code, $nextData) < 2) { + out += ' ' + (it.util.varReplace($code, $nextData, $passData)) + ' '; + } else { + out += ' var ' + ($nextData) + ' = ' + ($passData) + '; ' + ($code) + ' '; + } + if ($breakOnError) { + out += ' if (!' + ($nextValid) + ') break; '; + } + out += ' } '; + if ($breakOnError) { + out += ' else ' + ($nextValid) + ' = true; '; + } + out += ' } '; + if ($breakOnError) { + out += ' if (' + ($nextValid) + ') { '; + $closingBraces += '}'; + } + } + } + } + } + if ($breakOnError) { + out += ' ' + ($closingBraces) + ' if (' + ($errs) + ' == errors) {'; + } + return out; +} + + +/***/ }), + +/***/ 75472: +/***/ ((module) => { + +"use strict"; + +module.exports = function generate_propertyNames(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $data = 'data' + ($dataLvl || ''); + var $errs = 'errs__' + $lvl; + var $it = it.util.copy(it); + var $closingBraces = ''; + $it.level++; + var $nextValid = 'valid' + $it.level; + out += 'var ' + ($errs) + ' = errors;'; + if ((it.opts.strictKeywords ? typeof $schema == 'object' && Object.keys($schema).length > 0 : it.util.schemaHasRules($schema, it.RULES.all))) { + $it.schema = $schema; + $it.schemaPath = $schemaPath; + $it.errSchemaPath = $errSchemaPath; + var $key = 'key' + $lvl, + $idx = 'idx' + $lvl, + $i = 'i' + $lvl, + $invalidName = '\' + ' + $key + ' + \'', + $dataNxt = $it.dataLevel = it.dataLevel + 1, + $nextData = 'data' + $dataNxt, + $dataProperties = 'dataProperties' + $lvl, + $ownProperties = it.opts.ownProperties, + $currentBaseId = it.baseId; + if ($ownProperties) { + out += ' var ' + ($dataProperties) + ' = undefined; '; + } + if ($ownProperties) { + out += ' ' + ($dataProperties) + ' = ' + ($dataProperties) + ' || Object.keys(' + ($data) + '); for (var ' + ($idx) + '=0; ' + ($idx) + '<' + ($dataProperties) + '.length; ' + ($idx) + '++) { var ' + ($key) + ' = ' + ($dataProperties) + '[' + ($idx) + ']; '; + } else { + out += ' for (var ' + ($key) + ' in ' + ($data) + ') { '; + } + out += ' var startErrs' + ($lvl) + ' = errors; '; + var $passData = $key; + var $wasComposite = it.compositeRule; + it.compositeRule = $it.compositeRule = true; + var $code = it.validate($it); + $it.baseId = $currentBaseId; + if (it.util.varOccurences($code, $nextData) < 2) { + out += ' ' + (it.util.varReplace($code, $nextData, $passData)) + ' '; + } else { + out += ' var ' + ($nextData) + ' = ' + ($passData) + '; ' + ($code) + ' '; + } + it.compositeRule = $it.compositeRule = $wasComposite; + out += ' if (!' + ($nextValid) + ') { for (var ' + ($i) + '=startErrs' + ($lvl) + '; ' + ($i) + ' { + +"use strict"; + +module.exports = function generate_ref(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $data = 'data' + ($dataLvl || ''); + var $valid = 'valid' + $lvl; + var $async, $refCode; + if ($schema == '#' || $schema == '#/') { + if (it.isRoot) { + $async = it.async; + $refCode = 'validate'; + } else { + $async = it.root.schema.$async === true; + $refCode = 'root.refVal[0]'; + } + } else { + var $refVal = it.resolveRef(it.baseId, $schema, it.isRoot); + if ($refVal === undefined) { + var $message = it.MissingRefError.message(it.baseId, $schema); + if (it.opts.missingRefs == 'fail') { + it.logger.error($message); + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('$ref') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { ref: \'' + (it.util.escapeQuotes($schema)) + '\' } '; + if (it.opts.messages !== false) { + out += ' , message: \'can\\\'t resolve reference ' + (it.util.escapeQuotes($schema)) + '\' '; + } + if (it.opts.verbose) { + out += ' , schema: ' + (it.util.toQuotedString($schema)) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + if ($breakOnError) { + out += ' if (false) { '; + } + } else if (it.opts.missingRefs == 'ignore') { + it.logger.warn($message); + if ($breakOnError) { + out += ' if (true) { '; + } + } else { + throw new it.MissingRefError(it.baseId, $schema, $message); + } + } else if ($refVal.inline) { + var $it = it.util.copy(it); + $it.level++; + var $nextValid = 'valid' + $it.level; + $it.schema = $refVal.schema; + $it.schemaPath = ''; + $it.errSchemaPath = $schema; + var $code = it.validate($it).replace(/validate\.schema/g, $refVal.code); + out += ' ' + ($code) + ' '; + if ($breakOnError) { + out += ' if (' + ($nextValid) + ') { '; + } + } else { + $async = $refVal.$async === true || (it.async && $refVal.$async !== false); + $refCode = $refVal.code; + } + } + if ($refCode) { + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; + if (it.opts.passContext) { + out += ' ' + ($refCode) + '.call(this, '; + } else { + out += ' ' + ($refCode) + '( '; + } + out += ' ' + ($data) + ', (dataPath || \'\')'; + if (it.errorPath != '""') { + out += ' + ' + (it.errorPath); + } + var $parentData = $dataLvl ? 'data' + (($dataLvl - 1) || '') : 'parentData', + $parentDataProperty = $dataLvl ? it.dataPathArr[$dataLvl] : 'parentDataProperty'; + out += ' , ' + ($parentData) + ' , ' + ($parentDataProperty) + ', rootData) '; + var __callValidate = out; + out = $$outStack.pop(); + if ($async) { + if (!it.async) throw new Error('async schema referenced by sync schema'); + if ($breakOnError) { + out += ' var ' + ($valid) + '; '; + } + out += ' try { await ' + (__callValidate) + '; '; + if ($breakOnError) { + out += ' ' + ($valid) + ' = true; '; + } + out += ' } catch (e) { if (!(e instanceof ValidationError)) throw e; if (vErrors === null) vErrors = e.errors; else vErrors = vErrors.concat(e.errors); errors = vErrors.length; '; + if ($breakOnError) { + out += ' ' + ($valid) + ' = false; '; + } + out += ' } '; + if ($breakOnError) { + out += ' if (' + ($valid) + ') { '; + } + } else { + out += ' if (!' + (__callValidate) + ') { if (vErrors === null) vErrors = ' + ($refCode) + '.errors; else vErrors = vErrors.concat(' + ($refCode) + '.errors); errors = vErrors.length; } '; + if ($breakOnError) { + out += ' else { '; + } + } + } + return out; +} + + +/***/ }), + +/***/ 797: +/***/ ((module) => { + +"use strict"; + +module.exports = function generate_required(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $data = 'data' + ($dataLvl || ''); + var $valid = 'valid' + $lvl; + var $isData = it.opts.$data && $schema && $schema.$data, + $schemaValue; + if ($isData) { + out += ' var schema' + ($lvl) + ' = ' + (it.util.getData($schema.$data, $dataLvl, it.dataPathArr)) + '; '; + $schemaValue = 'schema' + $lvl; + } else { + $schemaValue = $schema; + } + var $vSchema = 'schema' + $lvl; + if (!$isData) { + if ($schema.length < it.opts.loopRequired && it.schema.properties && Object.keys(it.schema.properties).length) { + var $required = []; + var arr1 = $schema; + if (arr1) { + var $property, i1 = -1, + l1 = arr1.length - 1; + while (i1 < l1) { + $property = arr1[i1 += 1]; + var $propertySch = it.schema.properties[$property]; + if (!($propertySch && (it.opts.strictKeywords ? typeof $propertySch == 'object' && Object.keys($propertySch).length > 0 : it.util.schemaHasRules($propertySch, it.RULES.all)))) { + $required[$required.length] = $property; + } + } + } + } else { + var $required = $schema; + } + } + if ($isData || $required.length) { + var $currentErrorPath = it.errorPath, + $loopRequired = $isData || $required.length >= it.opts.loopRequired, + $ownProperties = it.opts.ownProperties; + if ($breakOnError) { + out += ' var missing' + ($lvl) + '; '; + if ($loopRequired) { + if (!$isData) { + out += ' var ' + ($vSchema) + ' = validate.schema' + ($schemaPath) + '; '; + } + var $i = 'i' + $lvl, + $propertyPath = 'schema' + $lvl + '[' + $i + ']', + $missingProperty = '\' + ' + $propertyPath + ' + \''; + if (it.opts._errorDataPathProperty) { + it.errorPath = it.util.getPathExpr($currentErrorPath, $propertyPath, it.opts.jsonPointers); + } + out += ' var ' + ($valid) + ' = true; '; + if ($isData) { + out += ' if (schema' + ($lvl) + ' === undefined) ' + ($valid) + ' = true; else if (!Array.isArray(schema' + ($lvl) + ')) ' + ($valid) + ' = false; else {'; + } + out += ' for (var ' + ($i) + ' = 0; ' + ($i) + ' < ' + ($vSchema) + '.length; ' + ($i) + '++) { ' + ($valid) + ' = ' + ($data) + '[' + ($vSchema) + '[' + ($i) + ']] !== undefined '; + if ($ownProperties) { + out += ' && Object.prototype.hasOwnProperty.call(' + ($data) + ', ' + ($vSchema) + '[' + ($i) + ']) '; + } + out += '; if (!' + ($valid) + ') break; } '; + if ($isData) { + out += ' } '; + } + out += ' if (!' + ($valid) + ') { '; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('required') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { missingProperty: \'' + ($missingProperty) + '\' } '; + if (it.opts.messages !== false) { + out += ' , message: \''; + if (it.opts._errorDataPathProperty) { + out += 'is a required property'; + } else { + out += 'should have required property \\\'' + ($missingProperty) + '\\\''; + } + out += '\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + out += ' } else { '; + } else { + out += ' if ( '; + var arr2 = $required; + if (arr2) { + var $propertyKey, $i = -1, + l2 = arr2.length - 1; + while ($i < l2) { + $propertyKey = arr2[$i += 1]; + if ($i) { + out += ' || '; + } + var $prop = it.util.getProperty($propertyKey), + $useData = $data + $prop; + out += ' ( ( ' + ($useData) + ' === undefined '; + if ($ownProperties) { + out += ' || ! Object.prototype.hasOwnProperty.call(' + ($data) + ', \'' + (it.util.escapeQuotes($propertyKey)) + '\') '; + } + out += ') && (missing' + ($lvl) + ' = ' + (it.util.toQuotedString(it.opts.jsonPointers ? $propertyKey : $prop)) + ') ) '; + } + } + out += ') { '; + var $propertyPath = 'missing' + $lvl, + $missingProperty = '\' + ' + $propertyPath + ' + \''; + if (it.opts._errorDataPathProperty) { + it.errorPath = it.opts.jsonPointers ? it.util.getPathExpr($currentErrorPath, $propertyPath, true) : $currentErrorPath + ' + ' + $propertyPath; + } + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('required') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { missingProperty: \'' + ($missingProperty) + '\' } '; + if (it.opts.messages !== false) { + out += ' , message: \''; + if (it.opts._errorDataPathProperty) { + out += 'is a required property'; + } else { + out += 'should have required property \\\'' + ($missingProperty) + '\\\''; + } + out += '\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + out += ' } else { '; + } + } else { + if ($loopRequired) { + if (!$isData) { + out += ' var ' + ($vSchema) + ' = validate.schema' + ($schemaPath) + '; '; + } + var $i = 'i' + $lvl, + $propertyPath = 'schema' + $lvl + '[' + $i + ']', + $missingProperty = '\' + ' + $propertyPath + ' + \''; + if (it.opts._errorDataPathProperty) { + it.errorPath = it.util.getPathExpr($currentErrorPath, $propertyPath, it.opts.jsonPointers); + } + if ($isData) { + out += ' if (' + ($vSchema) + ' && !Array.isArray(' + ($vSchema) + ')) { var err = '; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('required') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { missingProperty: \'' + ($missingProperty) + '\' } '; + if (it.opts.messages !== false) { + out += ' , message: \''; + if (it.opts._errorDataPathProperty) { + out += 'is a required property'; + } else { + out += 'should have required property \\\'' + ($missingProperty) + '\\\''; + } + out += '\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + out += '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; } else if (' + ($vSchema) + ' !== undefined) { '; + } + out += ' for (var ' + ($i) + ' = 0; ' + ($i) + ' < ' + ($vSchema) + '.length; ' + ($i) + '++) { if (' + ($data) + '[' + ($vSchema) + '[' + ($i) + ']] === undefined '; + if ($ownProperties) { + out += ' || ! Object.prototype.hasOwnProperty.call(' + ($data) + ', ' + ($vSchema) + '[' + ($i) + ']) '; + } + out += ') { var err = '; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('required') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { missingProperty: \'' + ($missingProperty) + '\' } '; + if (it.opts.messages !== false) { + out += ' , message: \''; + if (it.opts._errorDataPathProperty) { + out += 'is a required property'; + } else { + out += 'should have required property \\\'' + ($missingProperty) + '\\\''; + } + out += '\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + out += '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; } } '; + if ($isData) { + out += ' } '; + } + } else { + var arr3 = $required; + if (arr3) { + var $propertyKey, i3 = -1, + l3 = arr3.length - 1; + while (i3 < l3) { + $propertyKey = arr3[i3 += 1]; + var $prop = it.util.getProperty($propertyKey), + $missingProperty = it.util.escapeQuotes($propertyKey), + $useData = $data + $prop; + if (it.opts._errorDataPathProperty) { + it.errorPath = it.util.getPath($currentErrorPath, $propertyKey, it.opts.jsonPointers); + } + out += ' if ( ' + ($useData) + ' === undefined '; + if ($ownProperties) { + out += ' || ! Object.prototype.hasOwnProperty.call(' + ($data) + ', \'' + (it.util.escapeQuotes($propertyKey)) + '\') '; + } + out += ') { var err = '; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('required') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { missingProperty: \'' + ($missingProperty) + '\' } '; + if (it.opts.messages !== false) { + out += ' , message: \''; + if (it.opts._errorDataPathProperty) { + out += 'is a required property'; + } else { + out += 'should have required property \\\'' + ($missingProperty) + '\\\''; + } + out += '\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + out += '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; } '; + } + } + } + } + it.errorPath = $currentErrorPath; + } else if ($breakOnError) { + out += ' if (true) {'; + } + return out; +} + + +/***/ }), + +/***/ 53526: +/***/ ((module) => { + +"use strict"; + +module.exports = function generate_uniqueItems(it, $keyword, $ruleType) { + var out = ' '; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $data = 'data' + ($dataLvl || ''); + var $valid = 'valid' + $lvl; + var $isData = it.opts.$data && $schema && $schema.$data, + $schemaValue; + if ($isData) { + out += ' var schema' + ($lvl) + ' = ' + (it.util.getData($schema.$data, $dataLvl, it.dataPathArr)) + '; '; + $schemaValue = 'schema' + $lvl; + } else { + $schemaValue = $schema; + } + if (($schema || $isData) && it.opts.uniqueItems !== false) { + if ($isData) { + out += ' var ' + ($valid) + '; if (' + ($schemaValue) + ' === false || ' + ($schemaValue) + ' === undefined) ' + ($valid) + ' = true; else if (typeof ' + ($schemaValue) + ' != \'boolean\') ' + ($valid) + ' = false; else { '; + } + out += ' var i = ' + ($data) + '.length , ' + ($valid) + ' = true , j; if (i > 1) { '; + var $itemType = it.schema.items && it.schema.items.type, + $typeIsArray = Array.isArray($itemType); + if (!$itemType || $itemType == 'object' || $itemType == 'array' || ($typeIsArray && ($itemType.indexOf('object') >= 0 || $itemType.indexOf('array') >= 0))) { + out += ' outer: for (;i--;) { for (j = i; j--;) { if (equal(' + ($data) + '[i], ' + ($data) + '[j])) { ' + ($valid) + ' = false; break outer; } } } '; + } else { + out += ' var itemIndices = {}, item; for (;i--;) { var item = ' + ($data) + '[i]; '; + var $method = 'checkDataType' + ($typeIsArray ? 's' : ''); + out += ' if (' + (it.util[$method]($itemType, 'item', it.opts.strictNumbers, true)) + ') continue; '; + if ($typeIsArray) { + out += ' if (typeof item == \'string\') item = \'"\' + item; '; + } + out += ' if (typeof itemIndices[item] == \'number\') { ' + ($valid) + ' = false; j = itemIndices[item]; break; } itemIndices[item] = i; } '; + } + out += ' } '; + if ($isData) { + out += ' } '; + } + out += ' if (!' + ($valid) + ') { '; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ('uniqueItems') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { i: i, j: j } '; + if (it.opts.messages !== false) { + out += ' , message: \'should NOT have duplicate items (items ## \' + j + \' and \' + i + \' are identical)\' '; + } + if (it.opts.verbose) { + out += ' , schema: '; + if ($isData) { + out += 'validate.schema' + ($schemaPath); + } else { + out += '' + ($schema); + } + out += ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + out += ' } '; + if ($breakOnError) { + out += ' else { '; + } + } else { + if ($breakOnError) { + out += ' if (true) { '; + } + } + return out; +} + + +/***/ }), + +/***/ 17302: +/***/ ((module) => { + +"use strict"; + +module.exports = function generate_validate(it, $keyword, $ruleType) { + var out = ''; + var $async = it.schema.$async === true, + $refKeywords = it.util.schemaHasRulesExcept(it.schema, it.RULES.all, '$ref'), + $id = it.self._getId(it.schema); + if (it.opts.strictKeywords) { + var $unknownKwd = it.util.schemaUnknownRules(it.schema, it.RULES.keywords); + if ($unknownKwd) { + var $keywordsMsg = 'unknown keyword: ' + $unknownKwd; + if (it.opts.strictKeywords === 'log') it.logger.warn($keywordsMsg); + else throw new Error($keywordsMsg); + } + } + if (it.isTop) { + out += ' var validate = '; + if ($async) { + it.async = true; + out += 'async '; + } + out += 'function(data, dataPath, parentData, parentDataProperty, rootData) { \'use strict\'; '; + if ($id && (it.opts.sourceCode || it.opts.processCode)) { + out += ' ' + ('/\*# sourceURL=' + $id + ' */') + ' '; + } + } + if (typeof it.schema == 'boolean' || !($refKeywords || it.schema.$ref)) { + var $keyword = 'false schema'; + var $lvl = it.level; + var $dataLvl = it.dataLevel; + var $schema = it.schema[$keyword]; + var $schemaPath = it.schemaPath + it.util.getProperty($keyword); + var $errSchemaPath = it.errSchemaPath + '/' + $keyword; + var $breakOnError = !it.opts.allErrors; + var $errorKeyword; + var $data = 'data' + ($dataLvl || ''); + var $valid = 'valid' + $lvl; + if (it.schema === false) { + if (it.isTop) { + $breakOnError = true; + } else { + out += ' var ' + ($valid) + ' = false; '; + } + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ($errorKeyword || 'false schema') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: {} '; + if (it.opts.messages !== false) { + out += ' , message: \'boolean schema is false\' '; + } + if (it.opts.verbose) { + out += ' , schema: false , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + } else { + if (it.isTop) { + if ($async) { + out += ' return data; '; + } else { + out += ' validate.errors = null; return true; '; + } + } else { + out += ' var ' + ($valid) + ' = true; '; + } + } + if (it.isTop) { + out += ' }; return validate; '; + } + return out; + } + if (it.isTop) { + var $top = it.isTop, + $lvl = it.level = 0, + $dataLvl = it.dataLevel = 0, + $data = 'data'; + it.rootId = it.resolve.fullPath(it.self._getId(it.root.schema)); + it.baseId = it.baseId || it.rootId; + delete it.isTop; + it.dataPathArr = [undefined]; + if (it.schema.default !== undefined && it.opts.useDefaults && it.opts.strictDefaults) { + var $defaultMsg = 'default is ignored in the schema root'; + if (it.opts.strictDefaults === 'log') it.logger.warn($defaultMsg); + else throw new Error($defaultMsg); + } + out += ' var vErrors = null; '; + out += ' var errors = 0; '; + out += ' if (rootData === undefined) rootData = data; '; + } else { + var $lvl = it.level, + $dataLvl = it.dataLevel, + $data = 'data' + ($dataLvl || ''); + if ($id) it.baseId = it.resolve.url(it.baseId, $id); + if ($async && !it.async) throw new Error('async schema in sync schema'); + out += ' var errs_' + ($lvl) + ' = errors;'; + } + var $valid = 'valid' + $lvl, + $breakOnError = !it.opts.allErrors, + $closingBraces1 = '', + $closingBraces2 = ''; + var $errorKeyword; + var $typeSchema = it.schema.type, + $typeIsArray = Array.isArray($typeSchema); + if ($typeSchema && it.opts.nullable && it.schema.nullable === true) { + if ($typeIsArray) { + if ($typeSchema.indexOf('null') == -1) $typeSchema = $typeSchema.concat('null'); + } else if ($typeSchema != 'null') { + $typeSchema = [$typeSchema, 'null']; + $typeIsArray = true; + } + } + if ($typeIsArray && $typeSchema.length == 1) { + $typeSchema = $typeSchema[0]; + $typeIsArray = false; + } + if (it.schema.$ref && $refKeywords) { + if (it.opts.extendRefs == 'fail') { + throw new Error('$ref: validation keywords used in schema at path "' + it.errSchemaPath + '" (see option extendRefs)'); + } else if (it.opts.extendRefs !== true) { + $refKeywords = false; + it.logger.warn('$ref: keywords ignored in schema at path "' + it.errSchemaPath + '"'); + } + } + if (it.schema.$comment && it.opts.$comment) { + out += ' ' + (it.RULES.all.$comment.code(it, '$comment')); + } + if ($typeSchema) { + if (it.opts.coerceTypes) { + var $coerceToTypes = it.util.coerceToTypes(it.opts.coerceTypes, $typeSchema); + } + var $rulesGroup = it.RULES.types[$typeSchema]; + if ($coerceToTypes || $typeIsArray || $rulesGroup === true || ($rulesGroup && !$shouldUseGroup($rulesGroup))) { + var $schemaPath = it.schemaPath + '.type', + $errSchemaPath = it.errSchemaPath + '/type'; + var $schemaPath = it.schemaPath + '.type', + $errSchemaPath = it.errSchemaPath + '/type', + $method = $typeIsArray ? 'checkDataTypes' : 'checkDataType'; + out += ' if (' + (it.util[$method]($typeSchema, $data, it.opts.strictNumbers, true)) + ') { '; + if ($coerceToTypes) { + var $dataType = 'dataType' + $lvl, + $coerced = 'coerced' + $lvl; + out += ' var ' + ($dataType) + ' = typeof ' + ($data) + '; '; + if (it.opts.coerceTypes == 'array') { + out += ' if (' + ($dataType) + ' == \'object\' && Array.isArray(' + ($data) + ')) ' + ($dataType) + ' = \'array\'; '; + } + out += ' var ' + ($coerced) + ' = undefined; '; + var $bracesCoercion = ''; + var arr1 = $coerceToTypes; + if (arr1) { + var $type, $i = -1, + l1 = arr1.length - 1; + while ($i < l1) { + $type = arr1[$i += 1]; + if ($i) { + out += ' if (' + ($coerced) + ' === undefined) { '; + $bracesCoercion += '}'; + } + if (it.opts.coerceTypes == 'array' && $type != 'array') { + out += ' if (' + ($dataType) + ' == \'array\' && ' + ($data) + '.length == 1) { ' + ($coerced) + ' = ' + ($data) + ' = ' + ($data) + '[0]; ' + ($dataType) + ' = typeof ' + ($data) + '; } '; + } + if ($type == 'string') { + out += ' if (' + ($dataType) + ' == \'number\' || ' + ($dataType) + ' == \'boolean\') ' + ($coerced) + ' = \'\' + ' + ($data) + '; else if (' + ($data) + ' === null) ' + ($coerced) + ' = \'\'; '; + } else if ($type == 'number' || $type == 'integer') { + out += ' if (' + ($dataType) + ' == \'boolean\' || ' + ($data) + ' === null || (' + ($dataType) + ' == \'string\' && ' + ($data) + ' && ' + ($data) + ' == +' + ($data) + ' '; + if ($type == 'integer') { + out += ' && !(' + ($data) + ' % 1)'; + } + out += ')) ' + ($coerced) + ' = +' + ($data) + '; '; + } else if ($type == 'boolean') { + out += ' if (' + ($data) + ' === \'false\' || ' + ($data) + ' === 0 || ' + ($data) + ' === null) ' + ($coerced) + ' = false; else if (' + ($data) + ' === \'true\' || ' + ($data) + ' === 1) ' + ($coerced) + ' = true; '; + } else if ($type == 'null') { + out += ' if (' + ($data) + ' === \'\' || ' + ($data) + ' === 0 || ' + ($data) + ' === false) ' + ($coerced) + ' = null; '; + } else if (it.opts.coerceTypes == 'array' && $type == 'array') { + out += ' if (' + ($dataType) + ' == \'string\' || ' + ($dataType) + ' == \'number\' || ' + ($dataType) + ' == \'boolean\' || ' + ($data) + ' == null) ' + ($coerced) + ' = [' + ($data) + ']; '; + } + } + } + out += ' ' + ($bracesCoercion) + ' if (' + ($coerced) + ' === undefined) { '; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ($errorKeyword || 'type') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { type: \''; + if ($typeIsArray) { + out += '' + ($typeSchema.join(",")); + } else { + out += '' + ($typeSchema); + } + out += '\' } '; + if (it.opts.messages !== false) { + out += ' , message: \'should be '; + if ($typeIsArray) { + out += '' + ($typeSchema.join(",")); + } else { + out += '' + ($typeSchema); + } + out += '\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + out += ' } else { '; + var $parentData = $dataLvl ? 'data' + (($dataLvl - 1) || '') : 'parentData', + $parentDataProperty = $dataLvl ? it.dataPathArr[$dataLvl] : 'parentDataProperty'; + out += ' ' + ($data) + ' = ' + ($coerced) + '; '; + if (!$dataLvl) { + out += 'if (' + ($parentData) + ' !== undefined)'; + } + out += ' ' + ($parentData) + '[' + ($parentDataProperty) + '] = ' + ($coerced) + '; } '; + } else { + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ($errorKeyword || 'type') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { type: \''; + if ($typeIsArray) { + out += '' + ($typeSchema.join(",")); + } else { + out += '' + ($typeSchema); + } + out += '\' } '; + if (it.opts.messages !== false) { + out += ' , message: \'should be '; + if ($typeIsArray) { + out += '' + ($typeSchema.join(",")); + } else { + out += '' + ($typeSchema); + } + out += '\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + } + out += ' } '; + } + } + if (it.schema.$ref && !$refKeywords) { + out += ' ' + (it.RULES.all.$ref.code(it, '$ref')) + ' '; + if ($breakOnError) { + out += ' } if (errors === '; + if ($top) { + out += '0'; + } else { + out += 'errs_' + ($lvl); + } + out += ') { '; + $closingBraces2 += '}'; + } + } else { + var arr2 = it.RULES; + if (arr2) { + var $rulesGroup, i2 = -1, + l2 = arr2.length - 1; + while (i2 < l2) { + $rulesGroup = arr2[i2 += 1]; + if ($shouldUseGroup($rulesGroup)) { + if ($rulesGroup.type) { + out += ' if (' + (it.util.checkDataType($rulesGroup.type, $data, it.opts.strictNumbers)) + ') { '; + } + if (it.opts.useDefaults) { + if ($rulesGroup.type == 'object' && it.schema.properties) { + var $schema = it.schema.properties, + $schemaKeys = Object.keys($schema); + var arr3 = $schemaKeys; + if (arr3) { + var $propertyKey, i3 = -1, + l3 = arr3.length - 1; + while (i3 < l3) { + $propertyKey = arr3[i3 += 1]; + var $sch = $schema[$propertyKey]; + if ($sch.default !== undefined) { + var $passData = $data + it.util.getProperty($propertyKey); + if (it.compositeRule) { + if (it.opts.strictDefaults) { + var $defaultMsg = 'default is ignored for: ' + $passData; + if (it.opts.strictDefaults === 'log') it.logger.warn($defaultMsg); + else throw new Error($defaultMsg); + } + } else { + out += ' if (' + ($passData) + ' === undefined '; + if (it.opts.useDefaults == 'empty') { + out += ' || ' + ($passData) + ' === null || ' + ($passData) + ' === \'\' '; + } + out += ' ) ' + ($passData) + ' = '; + if (it.opts.useDefaults == 'shared') { + out += ' ' + (it.useDefault($sch.default)) + ' '; + } else { + out += ' ' + (JSON.stringify($sch.default)) + ' '; + } + out += '; '; + } + } + } + } + } else if ($rulesGroup.type == 'array' && Array.isArray(it.schema.items)) { + var arr4 = it.schema.items; + if (arr4) { + var $sch, $i = -1, + l4 = arr4.length - 1; + while ($i < l4) { + $sch = arr4[$i += 1]; + if ($sch.default !== undefined) { + var $passData = $data + '[' + $i + ']'; + if (it.compositeRule) { + if (it.opts.strictDefaults) { + var $defaultMsg = 'default is ignored for: ' + $passData; + if (it.opts.strictDefaults === 'log') it.logger.warn($defaultMsg); + else throw new Error($defaultMsg); + } + } else { + out += ' if (' + ($passData) + ' === undefined '; + if (it.opts.useDefaults == 'empty') { + out += ' || ' + ($passData) + ' === null || ' + ($passData) + ' === \'\' '; + } + out += ' ) ' + ($passData) + ' = '; + if (it.opts.useDefaults == 'shared') { + out += ' ' + (it.useDefault($sch.default)) + ' '; + } else { + out += ' ' + (JSON.stringify($sch.default)) + ' '; + } + out += '; '; + } + } + } + } + } + } + var arr5 = $rulesGroup.rules; + if (arr5) { + var $rule, i5 = -1, + l5 = arr5.length - 1; + while (i5 < l5) { + $rule = arr5[i5 += 1]; + if ($shouldUseRule($rule)) { + var $code = $rule.code(it, $rule.keyword, $rulesGroup.type); + if ($code) { + out += ' ' + ($code) + ' '; + if ($breakOnError) { + $closingBraces1 += '}'; + } + } + } + } + } + if ($breakOnError) { + out += ' ' + ($closingBraces1) + ' '; + $closingBraces1 = ''; + } + if ($rulesGroup.type) { + out += ' } '; + if ($typeSchema && $typeSchema === $rulesGroup.type && !$coerceToTypes) { + out += ' else { '; + var $schemaPath = it.schemaPath + '.type', + $errSchemaPath = it.errSchemaPath + '/type'; + var $$outStack = $$outStack || []; + $$outStack.push(out); + out = ''; /* istanbul ignore else */ + if (it.createErrors !== false) { + out += ' { keyword: \'' + ($errorKeyword || 'type') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { type: \''; + if ($typeIsArray) { + out += '' + ($typeSchema.join(",")); + } else { + out += '' + ($typeSchema); + } + out += '\' } '; + if (it.opts.messages !== false) { + out += ' , message: \'should be '; + if ($typeIsArray) { + out += '' + ($typeSchema.join(",")); + } else { + out += '' + ($typeSchema); + } + out += '\' '; + } + if (it.opts.verbose) { + out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' '; + } + out += ' } '; + } else { + out += ' {} '; + } + var __err = out; + out = $$outStack.pop(); + if (!it.compositeRule && $breakOnError) { + /* istanbul ignore if */ + if (it.async) { + out += ' throw new ValidationError([' + (__err) + ']); '; + } else { + out += ' validate.errors = [' + (__err) + ']; return false; '; + } + } else { + out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; '; + } + out += ' } '; + } + } + if ($breakOnError) { + out += ' if (errors === '; + if ($top) { + out += '0'; + } else { + out += 'errs_' + ($lvl); + } + out += ') { '; + $closingBraces2 += '}'; + } + } + } + } + } + if ($breakOnError) { + out += ' ' + ($closingBraces2) + ' '; + } + if ($top) { + if ($async) { + out += ' if (errors === 0) return data; '; + out += ' else throw new ValidationError(vErrors); '; + } else { + out += ' validate.errors = vErrors; '; + out += ' return errors === 0; '; + } + out += ' }; return validate;'; + } else { + out += ' var ' + ($valid) + ' = errors === errs_' + ($lvl) + ';'; + } + + function $shouldUseGroup($rulesGroup) { + var rules = $rulesGroup.rules; + for (var i = 0; i < rules.length; i++) + if ($shouldUseRule(rules[i])) return true; + } + + function $shouldUseRule($rule) { + return it.schema[$rule.keyword] !== undefined || ($rule.implements && $ruleImplementsSomeKeyword($rule)); + } + + function $ruleImplementsSomeKeyword($rule) { + var impl = $rule.implements; + for (var i = 0; i < impl.length; i++) + if (it.schema[impl[i]] !== undefined) return true; + } + return out; +} + + +/***/ }), + +/***/ 77824: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +"use strict"; + + +var IDENTIFIER = /^[a-z_$][a-z0-9_$-]*$/i; +var customRuleCode = __nccwpck_require__(26773); +var definitionSchema = __nccwpck_require__(74112); + +module.exports = { + add: addKeyword, + get: getKeyword, + remove: removeKeyword, + validate: validateKeyword +}; + + +/** + * Define custom keyword + * @this Ajv + * @param {String} keyword custom keyword, should be unique (including different from all standard, custom and macro keywords). + * @param {Object} definition keyword definition object with properties `type` (type(s) which the keyword applies to), `validate` or `compile`. + * @return {Ajv} this for method chaining + */ +function addKeyword(keyword, definition) { + /* jshint validthis: true */ + /* eslint no-shadow: 0 */ + var RULES = this.RULES; + if (RULES.keywords[keyword]) + throw new Error('Keyword ' + keyword + ' is already defined'); + + if (!IDENTIFIER.test(keyword)) + throw new Error('Keyword ' + keyword + ' is not a valid identifier'); + + if (definition) { + this.validateKeyword(definition, true); + + var dataType = definition.type; + if (Array.isArray(dataType)) { + for (var i=0; i { + +/** + * archiver-utils + * + * Copyright (c) 2012-2014 Chris Talkington, contributors. + * Licensed under the MIT license. + * https://github.com/archiverjs/node-archiver/blob/master/LICENSE-MIT + */ +var fs = __nccwpck_require__(60015); +var path = __nccwpck_require__(71017); + +var flatten = __nccwpck_require__(692); +var difference = __nccwpck_require__(82385); +var union = __nccwpck_require__(85701); +var isPlainObject = __nccwpck_require__(44201); + +var glob = __nccwpck_require__(74614); + +var file = module.exports = {}; + +var pathSeparatorRe = /[\/\\]/g; + +// Process specified wildcard glob patterns or filenames against a +// callback, excluding and uniquing files in the result set. +var processPatterns = function(patterns, fn) { + // Filepaths to return. + var result = []; + // Iterate over flattened patterns array. + flatten(patterns).forEach(function(pattern) { + // If the first character is ! it should be omitted + var exclusion = pattern.indexOf('!') === 0; + // If the pattern is an exclusion, remove the ! + if (exclusion) { pattern = pattern.slice(1); } + // Find all matching files for this pattern. + var matches = fn(pattern); + if (exclusion) { + // If an exclusion, remove matching files. + result = difference(result, matches); + } else { + // Otherwise add matching files. + result = union(result, matches); + } + }); + return result; +}; + +// True if the file path exists. +file.exists = function() { + var filepath = path.join.apply(path, arguments); + return fs.existsSync(filepath); +}; + +// Return an array of all file paths that match the given wildcard patterns. +file.expand = function(...args) { + // If the first argument is an options object, save those options to pass + // into the File.prototype.glob.sync method. + var options = isPlainObject(args[0]) ? args.shift() : {}; + // Use the first argument if it's an Array, otherwise convert the arguments + // object to an array and use that. + var patterns = Array.isArray(args[0]) ? args[0] : args; + // Return empty set if there are no patterns or filepaths. + if (patterns.length === 0) { return []; } + // Return all matching filepaths. + var matches = processPatterns(patterns, function(pattern) { + // Find all matching files for this pattern. + return glob.sync(pattern, options); + }); + // Filter result set? + if (options.filter) { + matches = matches.filter(function(filepath) { + filepath = path.join(options.cwd || '', filepath); + try { + if (typeof options.filter === 'function') { + return options.filter(filepath); + } else { + // If the file is of the right type and exists, this should work. + return fs.statSync(filepath)[options.filter](); + } + } catch(e) { + // Otherwise, it's probably not the right type. + return false; + } + }); + } + return matches; +}; + +// Build a multi task "files" object dynamically. +file.expandMapping = function(patterns, destBase, options) { + options = Object.assign({ + rename: function(destBase, destPath) { + return path.join(destBase || '', destPath); + } + }, options); + var files = []; + var fileByDest = {}; + // Find all files matching pattern, using passed-in options. + file.expand(options, patterns).forEach(function(src) { + var destPath = src; + // Flatten? + if (options.flatten) { + destPath = path.basename(destPath); + } + // Change the extension? + if (options.ext) { + destPath = destPath.replace(/(\.[^\/]*)?$/, options.ext); + } + // Generate destination filename. + var dest = options.rename(destBase, destPath, options); + // Prepend cwd to src path if necessary. + if (options.cwd) { src = path.join(options.cwd, src); } + // Normalize filepaths to be unix-style. + dest = dest.replace(pathSeparatorRe, '/'); + src = src.replace(pathSeparatorRe, '/'); + // Map correct src path to dest path. + if (fileByDest[dest]) { + // If dest already exists, push this src onto that dest's src array. + fileByDest[dest].src.push(src); + } else { + // Otherwise create a new src-dest file mapping object. + files.push({ + src: [src], + dest: dest, + }); + // And store a reference for later use. + fileByDest[dest] = files[files.length - 1]; + } + }); + return files; +}; + +// reusing bits of grunt's multi-task source normalization +file.normalizeFilesArray = function(data) { + var files = []; + + data.forEach(function(obj) { + var prop; + if ('src' in obj || 'dest' in obj) { + files.push(obj); + } + }); + + if (files.length === 0) { + return []; + } + + files = _(files).chain().forEach(function(obj) { + if (!('src' in obj) || !obj.src) { return; } + // Normalize .src properties to flattened array. + if (Array.isArray(obj.src)) { + obj.src = flatten(obj.src); + } else { + obj.src = [obj.src]; + } + }).map(function(obj) { + // Build options object, removing unwanted properties. + var expandOptions = Object.assign({}, obj); + delete expandOptions.src; + delete expandOptions.dest; + + // Expand file mappings. + if (obj.expand) { + return file.expandMapping(obj.src, obj.dest, expandOptions).map(function(mapObj) { + // Copy obj properties to result. + var result = Object.assign({}, obj); + // Make a clone of the orig obj available. + result.orig = Object.assign({}, obj); + // Set .src and .dest, processing both as templates. + result.src = mapObj.src; + result.dest = mapObj.dest; + // Remove unwanted properties. + ['expand', 'cwd', 'flatten', 'rename', 'ext'].forEach(function(prop) { + delete result[prop]; + }); + return result; + }); + } + + // Copy obj properties to result, adding an .orig property. + var result = Object.assign({}, obj); + // Make a clone of the orig obj available. + result.orig = Object.assign({}, obj); + + if ('src' in result) { + // Expose an expand-on-demand getter method as .src. + Object.defineProperty(result, 'src', { + enumerable: true, + get: function fn() { + var src; + if (!('result' in fn)) { + src = obj.src; + // If src is an array, flatten it. Otherwise, make it into an array. + src = Array.isArray(src) ? flatten(src) : [src]; + // Expand src files, memoizing result. + fn.result = file.expand(expandOptions, src); + } + return fn.result; + } + }); + } + + if ('dest' in result) { + result.dest = obj.dest; + } + + return result; + }).flatten().value(); + + return files; +}; + + +/***/ }), + +/***/ 88560: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +/** + * archiver-utils + * + * Copyright (c) 2015 Chris Talkington. + * Licensed under the MIT license. + * https://github.com/archiverjs/archiver-utils/blob/master/LICENSE + */ +var fs = __nccwpck_require__(60015); +var path = __nccwpck_require__(71017); +var nutil = __nccwpck_require__(73837); +var lazystream = __nccwpck_require__(76213); +var normalizePath = __nccwpck_require__(20920); +var defaults = __nccwpck_require__(2349); + +var Stream = (__nccwpck_require__(12781).Stream); +var PassThrough = (__nccwpck_require__(18483).PassThrough); + +var utils = module.exports = {}; +utils.file = __nccwpck_require__(55789); + +function assertPath(path) { + if (typeof path !== 'string') { + throw new TypeError('Path must be a string. Received ' + nutils.inspect(path)); + } +} + +utils.collectStream = function(source, callback) { + var collection = []; + var size = 0; + + source.on('error', callback); + + source.on('data', function(chunk) { + collection.push(chunk); + size += chunk.length; + }); + + source.on('end', function() { + var buf = new Buffer(size); + var offset = 0; + + collection.forEach(function(data) { + data.copy(buf, offset); + offset += data.length; + }); + + callback(null, buf); + }); +}; + +utils.dateify = function(dateish) { + dateish = dateish || new Date(); + + if (dateish instanceof Date) { + dateish = dateish; + } else if (typeof dateish === 'string') { + dateish = new Date(dateish); + } else { + dateish = new Date(); + } + + return dateish; +}; + +// this is slightly different from lodash version +utils.defaults = function(object, source, guard) { + var args = arguments; + args[0] = args[0] || {}; + + return defaults(...args); +}; + +utils.isStream = function(source) { + return source instanceof Stream; +}; + +utils.lazyReadStream = function(filepath) { + return new lazystream.Readable(function() { + return fs.createReadStream(filepath); + }); +}; + +utils.normalizeInputSource = function(source) { + if (source === null) { + return new Buffer(0); + } else if (typeof source === 'string') { + return new Buffer(source); + } else if (utils.isStream(source) && !source._readableState) { + var normalized = new PassThrough(); + source.pipe(normalized); + + return normalized; + } + + return source; +}; + +utils.sanitizePath = function(filepath) { + return normalizePath(filepath, false).replace(/^\w+:/, '').replace(/^(\.\.\/|\/)+/, ''); +}; + +utils.trailingSlashIt = function(str) { + return str.slice(-1) !== '/' ? str + '/' : str; +}; + +utils.unixifyPath = function(filepath) { + return normalizePath(filepath, false).replace(/^\w+:/, ''); +}; + +utils.walkdir = function(dirpath, base, callback) { + var results = []; + + if (typeof base === 'function') { + callback = base; + base = dirpath; + } + + fs.readdir(dirpath, function(err, list) { + var i = 0; + var file; + var filepath; + + if (err) { + return callback(err); + } + + (function next() { + file = list[i++]; + + if (!file) { + return callback(null, results); + } + + filepath = path.join(dirpath, file); + + fs.stat(filepath, function(err, stats) { + results.push({ + path: filepath, + relative: path.relative(base, filepath).replace(/\\/g, '/'), + stats: stats + }); + + if (stats && stats.isDirectory()) { + utils.walkdir(filepath, base, function(err, res) { + res.forEach(function(dirEntry) { + results.push(dirEntry); + }); + next(); + }); + } else { + next(); + } + }); + })(); + }); +}; + + +/***/ }), + +/***/ 73882: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +/** + * Archiver Vending + * + * @ignore + * @license [MIT]{@link https://github.com/archiverjs/node-archiver/blob/master/LICENSE} + * @copyright (c) 2012-2014 Chris Talkington, contributors. + */ +var Archiver = __nccwpck_require__(12479); + +var formats = {}; + +/** + * Dispenses a new Archiver instance. + * + * @constructor + * @param {String} format The archive format to use. + * @param {Object} options See [Archiver]{@link Archiver} + * @return {Archiver} + */ +var vending = function(format, options) { + return vending.create(format, options); +}; + +/** + * Creates a new Archiver instance. + * + * @param {String} format The archive format to use. + * @param {Object} options See [Archiver]{@link Archiver} + * @return {Archiver} + */ +vending.create = function(format, options) { + if (formats[format]) { + var instance = new Archiver(format, options); + instance.setFormat(format); + instance.setModule(new formats[format](options)); + + return instance; + } else { + throw new Error('create(' + format + '): format not registered'); + } +}; + +/** + * Registers a format for use with archiver. + * + * @param {String} format The name of the format. + * @param {Function} module The function for archiver to interact with. + * @return void + */ +vending.registerFormat = function(format, module) { + if (formats[format]) { + throw new Error('register(' + format + '): format already registered'); + } + + if (typeof module !== 'function') { + throw new Error('register(' + format + '): format module invalid'); + } + + if (typeof module.prototype.append !== 'function' || typeof module.prototype.finalize !== 'function') { + throw new Error('register(' + format + '): format module missing methods'); + } + + formats[format] = module; +}; + +vending.registerFormat('zip', __nccwpck_require__(14453)); +vending.registerFormat('tar', __nccwpck_require__(50571)); +vending.registerFormat('json', __nccwpck_require__(68177)); + +module.exports = vending; + +/***/ }), + +/***/ 12479: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +/** + * Archiver Core + * + * @ignore + * @license [MIT]{@link https://github.com/archiverjs/node-archiver/blob/master/LICENSE} + * @copyright (c) 2012-2014 Chris Talkington, contributors. + */ +var fs = __nccwpck_require__(57147); +var glob = __nccwpck_require__(36548); +var async = __nccwpck_require__(5181); +var path = __nccwpck_require__(71017); +var util = __nccwpck_require__(88560); + +var inherits = (__nccwpck_require__(73837).inherits); +var ArchiverError = __nccwpck_require__(73705); +var Transform = (__nccwpck_require__(67031).Transform); + +var win32 = process.platform === 'win32'; + +/** + * @constructor + * @param {String} format The archive format to use. + * @param {(CoreOptions|TransformOptions)} options See also {@link ZipOptions} and {@link TarOptions}. + */ +var Archiver = function(format, options) { + if (!(this instanceof Archiver)) { + return new Archiver(format, options); + } + + if (typeof format !== 'string') { + options = format; + format = 'zip'; + } + + options = this.options = util.defaults(options, { + highWaterMark: 1024 * 1024, + statConcurrency: 4 + }); + + Transform.call(this, options); + + this._format = false; + this._module = false; + this._pending = 0; + this._pointer = 0; + + this._entriesCount = 0; + this._entriesProcessedCount = 0; + this._fsEntriesTotalBytes = 0; + this._fsEntriesProcessedBytes = 0; + + this._queue = async.queue(this._onQueueTask.bind(this), 1); + this._queue.drain(this._onQueueDrain.bind(this)); + + this._statQueue = async.queue(this._onStatQueueTask.bind(this), options.statConcurrency); + this._statQueue.drain(this._onQueueDrain.bind(this)); + + this._state = { + aborted: false, + finalize: false, + finalizing: false, + finalized: false, + modulePiped: false + }; + + this._streams = []; +}; + +inherits(Archiver, Transform); + +/** + * Internal logic for `abort`. + * + * @private + * @return void + */ +Archiver.prototype._abort = function() { + this._state.aborted = true; + this._queue.kill(); + this._statQueue.kill(); + + if (this._queue.idle()) { + this._shutdown(); + } +}; + +/** + * Internal helper for appending files. + * + * @private + * @param {String} filepath The source filepath. + * @param {EntryData} data The entry data. + * @return void + */ +Archiver.prototype._append = function(filepath, data) { + data = data || {}; + + var task = { + source: null, + filepath: filepath + }; + + if (!data.name) { + data.name = filepath; + } + + data.sourcePath = filepath; + task.data = data; + this._entriesCount++; + + if (data.stats && data.stats instanceof fs.Stats) { + task = this._updateQueueTaskWithStats(task, data.stats); + if (task) { + if (data.stats.size) { + this._fsEntriesTotalBytes += data.stats.size; + } + + this._queue.push(task); + } + } else { + this._statQueue.push(task); + } +}; + +/** + * Internal logic for `finalize`. + * + * @private + * @return void + */ +Archiver.prototype._finalize = function() { + if (this._state.finalizing || this._state.finalized || this._state.aborted) { + return; + } + + this._state.finalizing = true; + + this._moduleFinalize(); + + this._state.finalizing = false; + this._state.finalized = true; +}; + +/** + * Checks the various state variables to determine if we can `finalize`. + * + * @private + * @return {Boolean} + */ +Archiver.prototype._maybeFinalize = function() { + if (this._state.finalizing || this._state.finalized || this._state.aborted) { + return false; + } + + if (this._state.finalize && this._pending === 0 && this._queue.idle() && this._statQueue.idle()) { + this._finalize(); + return true; + } + + return false; +}; + +/** + * Appends an entry to the module. + * + * @private + * @fires Archiver#entry + * @param {(Buffer|Stream)} source + * @param {EntryData} data + * @param {Function} callback + * @return void + */ +Archiver.prototype._moduleAppend = function(source, data, callback) { + if (this._state.aborted) { + callback(); + return; + } + + this._module.append(source, data, function(err) { + this._task = null; + + if (this._state.aborted) { + this._shutdown(); + return; + } + + if (err) { + this.emit('error', err); + setImmediate(callback); + return; + } + + /** + * Fires when the entry's input has been processed and appended to the archive. + * + * @event Archiver#entry + * @type {EntryData} + */ + this.emit('entry', data); + this._entriesProcessedCount++; + + if (data.stats && data.stats.size) { + this._fsEntriesProcessedBytes += data.stats.size; + } + + /** + * @event Archiver#progress + * @type {ProgressData} + */ + this.emit('progress', { + entries: { + total: this._entriesCount, + processed: this._entriesProcessedCount + }, + fs: { + totalBytes: this._fsEntriesTotalBytes, + processedBytes: this._fsEntriesProcessedBytes + } + }); + + setImmediate(callback); + }.bind(this)); +}; + +/** + * Finalizes the module. + * + * @private + * @return void + */ +Archiver.prototype._moduleFinalize = function() { + if (typeof this._module.finalize === 'function') { + this._module.finalize(); + } else if (typeof this._module.end === 'function') { + this._module.end(); + } else { + this.emit('error', new ArchiverError('NOENDMETHOD')); + } +}; + +/** + * Pipes the module to our internal stream with error bubbling. + * + * @private + * @return void + */ +Archiver.prototype._modulePipe = function() { + this._module.on('error', this._onModuleError.bind(this)); + this._module.pipe(this); + this._state.modulePiped = true; +}; + +/** + * Determines if the current module supports a defined feature. + * + * @private + * @param {String} key + * @return {Boolean} + */ +Archiver.prototype._moduleSupports = function(key) { + if (!this._module.supports || !this._module.supports[key]) { + return false; + } + + return this._module.supports[key]; +}; + +/** + * Unpipes the module from our internal stream. + * + * @private + * @return void + */ +Archiver.prototype._moduleUnpipe = function() { + this._module.unpipe(this); + this._state.modulePiped = false; +}; + +/** + * Normalizes entry data with fallbacks for key properties. + * + * @private + * @param {Object} data + * @param {fs.Stats} stats + * @return {Object} + */ +Archiver.prototype._normalizeEntryData = function(data, stats) { + data = util.defaults(data, { + type: 'file', + name: null, + date: null, + mode: null, + prefix: null, + sourcePath: null, + stats: false + }); + + if (stats && data.stats === false) { + data.stats = stats; + } + + var isDir = data.type === 'directory'; + + if (data.name) { + if (typeof data.prefix === 'string' && '' !== data.prefix) { + data.name = data.prefix + '/' + data.name; + data.prefix = null; + } + + data.name = util.sanitizePath(data.name); + + if (data.type !== 'symlink' && data.name.slice(-1) === '/') { + isDir = true; + data.type = 'directory'; + } else if (isDir) { + data.name += '/'; + } + } + + // 511 === 0777; 493 === 0755; 438 === 0666; 420 === 0644 + if (typeof data.mode === 'number') { + if (win32) { + data.mode &= 511; + } else { + data.mode &= 4095 + } + } else if (data.stats && data.mode === null) { + if (win32) { + data.mode = data.stats.mode & 511; + } else { + data.mode = data.stats.mode & 4095; + } + + // stat isn't reliable on windows; force 0755 for dir + if (win32 && isDir) { + data.mode = 493; + } + } else if (data.mode === null) { + data.mode = isDir ? 493 : 420; + } + + if (data.stats && data.date === null) { + data.date = data.stats.mtime; + } else { + data.date = util.dateify(data.date); + } + + return data; +}; + +/** + * Error listener that re-emits error on to our internal stream. + * + * @private + * @param {Error} err + * @return void + */ +Archiver.prototype._onModuleError = function(err) { + /** + * @event Archiver#error + * @type {ErrorData} + */ + this.emit('error', err); +}; + +/** + * Checks the various state variables after queue has drained to determine if + * we need to `finalize`. + * + * @private + * @return void + */ +Archiver.prototype._onQueueDrain = function() { + if (this._state.finalizing || this._state.finalized || this._state.aborted) { + return; + } + + if (this._state.finalize && this._pending === 0 && this._queue.idle() && this._statQueue.idle()) { + this._finalize(); + } +}; + +/** + * Appends each queue task to the module. + * + * @private + * @param {Object} task + * @param {Function} callback + * @return void + */ +Archiver.prototype._onQueueTask = function(task, callback) { + var fullCallback = () => { + if(task.data.callback) { + task.data.callback(); + } + callback(); + } + + if (this._state.finalizing || this._state.finalized || this._state.aborted) { + fullCallback(); + return; + } + + this._task = task; + this._moduleAppend(task.source, task.data, fullCallback); +}; + +/** + * Performs a file stat and reinjects the task back into the queue. + * + * @private + * @param {Object} task + * @param {Function} callback + * @return void + */ +Archiver.prototype._onStatQueueTask = function(task, callback) { + if (this._state.finalizing || this._state.finalized || this._state.aborted) { + callback(); + return; + } + + fs.lstat(task.filepath, function(err, stats) { + if (this._state.aborted) { + setImmediate(callback); + return; + } + + if (err) { + this._entriesCount--; + + /** + * @event Archiver#warning + * @type {ErrorData} + */ + this.emit('warning', err); + setImmediate(callback); + return; + } + + task = this._updateQueueTaskWithStats(task, stats); + + if (task) { + if (stats.size) { + this._fsEntriesTotalBytes += stats.size; + } + + this._queue.push(task); + } + + setImmediate(callback); + }.bind(this)); +}; + +/** + * Unpipes the module and ends our internal stream. + * + * @private + * @return void + */ +Archiver.prototype._shutdown = function() { + this._moduleUnpipe(); + this.end(); +}; + +/** + * Tracks the bytes emitted by our internal stream. + * + * @private + * @param {Buffer} chunk + * @param {String} encoding + * @param {Function} callback + * @return void + */ +Archiver.prototype._transform = function(chunk, encoding, callback) { + if (chunk) { + this._pointer += chunk.length; + } + + callback(null, chunk); +}; + +/** + * Updates and normalizes a queue task using stats data. + * + * @private + * @param {Object} task + * @param {fs.Stats} stats + * @return {Object} + */ +Archiver.prototype._updateQueueTaskWithStats = function(task, stats) { + if (stats.isFile()) { + task.data.type = 'file'; + task.data.sourceType = 'stream'; + task.source = util.lazyReadStream(task.filepath); + } else if (stats.isDirectory() && this._moduleSupports('directory')) { + task.data.name = util.trailingSlashIt(task.data.name); + task.data.type = 'directory'; + task.data.sourcePath = util.trailingSlashIt(task.filepath); + task.data.sourceType = 'buffer'; + task.source = Buffer.concat([]); + } else if (stats.isSymbolicLink() && this._moduleSupports('symlink')) { + var linkPath = fs.readlinkSync(task.filepath); + var dirName = path.dirname(task.filepath); + task.data.type = 'symlink'; + task.data.linkname = path.relative(dirName, path.resolve(dirName, linkPath)); + task.data.sourceType = 'buffer'; + task.source = Buffer.concat([]); + } else { + if (stats.isDirectory()) { + this.emit('warning', new ArchiverError('DIRECTORYNOTSUPPORTED', task.data)); + } else if (stats.isSymbolicLink()) { + this.emit('warning', new ArchiverError('SYMLINKNOTSUPPORTED', task.data)); + } else { + this.emit('warning', new ArchiverError('ENTRYNOTSUPPORTED', task.data)); + } + + return null; + } + + task.data = this._normalizeEntryData(task.data, stats); + + return task; +}; + +/** + * Aborts the archiving process, taking a best-effort approach, by: + * + * - removing any pending queue tasks + * - allowing any active queue workers to finish + * - detaching internal module pipes + * - ending both sides of the Transform stream + * + * It will NOT drain any remaining sources. + * + * @return {this} + */ +Archiver.prototype.abort = function() { + if (this._state.aborted || this._state.finalized) { + return this; + } + + this._abort(); + + return this; +}; + +/** + * Appends an input source (text string, buffer, or stream) to the instance. + * + * When the instance has received, processed, and emitted the input, the `entry` + * event is fired. + * + * @fires Archiver#entry + * @param {(Buffer|Stream|String)} source The input source. + * @param {EntryData} data See also {@link ZipEntryData} and {@link TarEntryData}. + * @return {this} + */ +Archiver.prototype.append = function(source, data) { + if (this._state.finalize || this._state.aborted) { + this.emit('error', new ArchiverError('QUEUECLOSED')); + return this; + } + + data = this._normalizeEntryData(data); + + if (typeof data.name !== 'string' || data.name.length === 0) { + this.emit('error', new ArchiverError('ENTRYNAMEREQUIRED')); + return this; + } + + if (data.type === 'directory' && !this._moduleSupports('directory')) { + this.emit('error', new ArchiverError('DIRECTORYNOTSUPPORTED', { name: data.name })); + return this; + } + + source = util.normalizeInputSource(source); + + if (Buffer.isBuffer(source)) { + data.sourceType = 'buffer'; + } else if (util.isStream(source)) { + data.sourceType = 'stream'; + } else { + this.emit('error', new ArchiverError('INPUTSTEAMBUFFERREQUIRED', { name: data.name })); + return this; + } + + this._entriesCount++; + this._queue.push({ + data: data, + source: source + }); + + return this; +}; + +/** + * Appends a directory and its files, recursively, given its dirpath. + * + * @param {String} dirpath The source directory path. + * @param {String} destpath The destination path within the archive. + * @param {(EntryData|Function)} data See also [ZipEntryData]{@link ZipEntryData} and + * [TarEntryData]{@link TarEntryData}. + * @return {this} + */ +Archiver.prototype.directory = function(dirpath, destpath, data) { + if (this._state.finalize || this._state.aborted) { + this.emit('error', new ArchiverError('QUEUECLOSED')); + return this; + } + + if (typeof dirpath !== 'string' || dirpath.length === 0) { + this.emit('error', new ArchiverError('DIRECTORYDIRPATHREQUIRED')); + return this; + } + + this._pending++; + + if (destpath === false) { + destpath = ''; + } else if (typeof destpath !== 'string'){ + destpath = dirpath; + } + + var dataFunction = false; + if (typeof data === 'function') { + dataFunction = data; + data = {}; + } else if (typeof data !== 'object') { + data = {}; + } + + var globOptions = { + stat: true, + dot: true + }; + + function onGlobEnd() { + this._pending--; + this._maybeFinalize(); + } + + function onGlobError(err) { + this.emit('error', err); + } + + function onGlobMatch(match){ + globber.pause(); + + var ignoreMatch = false; + var entryData = Object.assign({}, data); + entryData.name = match.relative; + entryData.prefix = destpath; + entryData.stats = match.stat; + entryData.callback = globber.resume.bind(globber); + + try { + if (dataFunction) { + entryData = dataFunction(entryData); + + if (entryData === false) { + ignoreMatch = true; + } else if (typeof entryData !== 'object') { + throw new ArchiverError('DIRECTORYFUNCTIONINVALIDDATA', { dirpath: dirpath }); + } + } + } catch(e) { + this.emit('error', e); + return; + } + + if (ignoreMatch) { + globber.resume(); + return; + } + + this._append(match.absolute, entryData); + } + + var globber = glob(dirpath, globOptions); + globber.on('error', onGlobError.bind(this)); + globber.on('match', onGlobMatch.bind(this)); + globber.on('end', onGlobEnd.bind(this)); + + return this; +}; + +/** + * Appends a file given its filepath using a + * [lazystream]{@link https://github.com/jpommerening/node-lazystream} wrapper to + * prevent issues with open file limits. + * + * When the instance has received, processed, and emitted the file, the `entry` + * event is fired. + * + * @param {String} filepath The source filepath. + * @param {EntryData} data See also [ZipEntryData]{@link ZipEntryData} and + * [TarEntryData]{@link TarEntryData}. + * @return {this} + */ +Archiver.prototype.file = function(filepath, data) { + if (this._state.finalize || this._state.aborted) { + this.emit('error', new ArchiverError('QUEUECLOSED')); + return this; + } + + if (typeof filepath !== 'string' || filepath.length === 0) { + this.emit('error', new ArchiverError('FILEFILEPATHREQUIRED')); + return this; + } + + this._append(filepath, data); + + return this; +}; + +/** + * Appends multiple files that match a glob pattern. + * + * @param {String} pattern The [glob pattern]{@link https://github.com/isaacs/minimatch} to match. + * @param {Object} options See [node-glob]{@link https://github.com/yqnn/node-readdir-glob#options}. + * @param {EntryData} data See also [ZipEntryData]{@link ZipEntryData} and + * [TarEntryData]{@link TarEntryData}. + * @return {this} + */ +Archiver.prototype.glob = function(pattern, options, data) { + this._pending++; + + options = util.defaults(options, { + stat: true, + pattern: pattern + }); + + function onGlobEnd() { + this._pending--; + this._maybeFinalize(); + } + + function onGlobError(err) { + this.emit('error', err); + } + + function onGlobMatch(match){ + globber.pause(); + var entryData = Object.assign({}, data); + entryData.callback = globber.resume.bind(globber); + entryData.stats = match.stat; + entryData.name = match.relative; + + this._append(match.absolute, entryData); + } + + var globber = glob(options.cwd || '.', options); + globber.on('error', onGlobError.bind(this)); + globber.on('match', onGlobMatch.bind(this)); + globber.on('end', onGlobEnd.bind(this)); + + return this; +}; + +/** + * Finalizes the instance and prevents further appending to the archive + * structure (queue will continue til drained). + * + * The `end`, `close` or `finish` events on the destination stream may fire + * right after calling this method so you should set listeners beforehand to + * properly detect stream completion. + * + * @return {this} + */ +Archiver.prototype.finalize = function() { + if (this._state.aborted) { + this.emit('error', new ArchiverError('ABORTED')); + return this; + } + + if (this._state.finalize) { + this.emit('error', new ArchiverError('FINALIZING')); + return this; + } + + this._state.finalize = true; + + if (this._pending === 0 && this._queue.idle() && this._statQueue.idle()) { + this._finalize(); + } + + var self = this; + + return new Promise(function(resolve, reject) { + var errored; + + self._module.on('end', function() { + if (!errored) { + resolve(); + } + }) + + self._module.on('error', function(err) { + errored = true; + reject(err); + }) + }) +}; + +/** + * Sets the module format name used for archiving. + * + * @param {String} format The name of the format. + * @return {this} + */ +Archiver.prototype.setFormat = function(format) { + if (this._format) { + this.emit('error', new ArchiverError('FORMATSET')); + return this; + } + + this._format = format; + + return this; +}; + +/** + * Sets the module used for archiving. + * + * @param {Function} module The function for archiver to interact with. + * @return {this} + */ +Archiver.prototype.setModule = function(module) { + if (this._state.aborted) { + this.emit('error', new ArchiverError('ABORTED')); + return this; + } + + if (this._state.module) { + this.emit('error', new ArchiverError('MODULESET')); + return this; + } + + this._module = module; + this._modulePipe(); + + return this; +}; + +/** + * Appends a symlink to the instance. + * + * This does NOT interact with filesystem and is used for programmatically creating symlinks. + * + * @param {String} filepath The symlink path (within archive). + * @param {String} target The target path (within archive). + * @return {this} + */ +Archiver.prototype.symlink = function(filepath, target) { + if (this._state.finalize || this._state.aborted) { + this.emit('error', new ArchiverError('QUEUECLOSED')); + return this; + } + + if (typeof filepath !== 'string' || filepath.length === 0) { + this.emit('error', new ArchiverError('SYMLINKFILEPATHREQUIRED')); + return this; + } + + if (typeof target !== 'string' || target.length === 0) { + this.emit('error', new ArchiverError('SYMLINKTARGETREQUIRED', { filepath: filepath })); + return this; + } + + if (!this._moduleSupports('symlink')) { + this.emit('error', new ArchiverError('SYMLINKNOTSUPPORTED', { filepath: filepath })); + return this; + } + + var data = {}; + data.type = 'symlink'; + data.name = filepath.replace(/\\/g, '/'); + data.linkname = target.replace(/\\/g, '/'); + data.sourceType = 'buffer'; + + this._entriesCount++; + this._queue.push({ + data: data, + source: Buffer.concat([]) + }); + + return this; +}; + +/** + * Returns the current length (in bytes) that has been emitted. + * + * @return {Number} + */ +Archiver.prototype.pointer = function() { + return this._pointer; +}; + +/** + * Middleware-like helper that has yet to be fully implemented. + * + * @private + * @param {Function} plugin + * @return {this} + */ +Archiver.prototype.use = function(plugin) { + this._streams.push(plugin); + return this; +}; + +module.exports = Archiver; + +/** + * @typedef {Object} CoreOptions + * @global + * @property {Number} [statConcurrency=4] Sets the number of workers used to + * process the internal fs stat queue. + */ + +/** + * @typedef {Object} TransformOptions + * @property {Boolean} [allowHalfOpen=true] If set to false, then the stream + * will automatically end the readable side when the writable side ends and vice + * versa. + * @property {Boolean} [readableObjectMode=false] Sets objectMode for readable + * side of the stream. Has no effect if objectMode is true. + * @property {Boolean} [writableObjectMode=false] Sets objectMode for writable + * side of the stream. Has no effect if objectMode is true. + * @property {Boolean} [decodeStrings=true] Whether or not to decode strings + * into Buffers before passing them to _write(). `Writable` + * @property {String} [encoding=NULL] If specified, then buffers will be decoded + * to strings using the specified encoding. `Readable` + * @property {Number} [highWaterMark=16kb] The maximum number of bytes to store + * in the internal buffer before ceasing to read from the underlying resource. + * `Readable` `Writable` + * @property {Boolean} [objectMode=false] Whether this stream should behave as a + * stream of objects. Meaning that stream.read(n) returns a single value instead + * of a Buffer of size n. `Readable` `Writable` + */ + +/** + * @typedef {Object} EntryData + * @property {String} name Sets the entry name including internal path. + * @property {(String|Date)} [date=NOW()] Sets the entry date. + * @property {Number} [mode=D:0755/F:0644] Sets the entry permissions. + * @property {String} [prefix] Sets a path prefix for the entry name. Useful + * when working with methods like `directory` or `glob`. + * @property {fs.Stats} [stats] Sets the fs stat data for this entry allowing + * for reduction of fs stat calls when stat data is already known. + */ + +/** + * @typedef {Object} ErrorData + * @property {String} message The message of the error. + * @property {String} code The error code assigned to this error. + * @property {String} data Additional data provided for reporting or debugging (where available). + */ + +/** + * @typedef {Object} ProgressData + * @property {Object} entries + * @property {Number} entries.total Number of entries that have been appended. + * @property {Number} entries.processed Number of entries that have been processed. + * @property {Object} fs + * @property {Number} fs.totalBytes Number of bytes that have been appended. Calculated asynchronously and might not be accurate: it growth while entries are added. (based on fs.Stats) + * @property {Number} fs.processedBytes Number of bytes that have been processed. (based on fs.Stats) + */ + + +/***/ }), + +/***/ 73705: +/***/ ((module, exports, __nccwpck_require__) => { + +/** + * Archiver Core + * + * @ignore + * @license [MIT]{@link https://github.com/archiverjs/node-archiver/blob/master/LICENSE} + * @copyright (c) 2012-2014 Chris Talkington, contributors. + */ + +var util = __nccwpck_require__(73837); + +const ERROR_CODES = { + 'ABORTED': 'archive was aborted', + 'DIRECTORYDIRPATHREQUIRED': 'diretory dirpath argument must be a non-empty string value', + 'DIRECTORYFUNCTIONINVALIDDATA': 'invalid data returned by directory custom data function', + 'ENTRYNAMEREQUIRED': 'entry name must be a non-empty string value', + 'FILEFILEPATHREQUIRED': 'file filepath argument must be a non-empty string value', + 'FINALIZING': 'archive already finalizing', + 'QUEUECLOSED': 'queue closed', + 'NOENDMETHOD': 'no suitable finalize/end method defined by module', + 'DIRECTORYNOTSUPPORTED': 'support for directory entries not defined by module', + 'FORMATSET': 'archive format already set', + 'INPUTSTEAMBUFFERREQUIRED': 'input source must be valid Stream or Buffer instance', + 'MODULESET': 'module already set', + 'SYMLINKNOTSUPPORTED': 'support for symlink entries not defined by module', + 'SYMLINKFILEPATHREQUIRED': 'symlink filepath argument must be a non-empty string value', + 'SYMLINKTARGETREQUIRED': 'symlink target argument must be a non-empty string value', + 'ENTRYNOTSUPPORTED': 'entry not supported' +}; + +function ArchiverError(code, data) { + Error.captureStackTrace(this, this.constructor); + //this.name = this.constructor.name; + this.message = ERROR_CODES[code] || code; + this.code = code; + this.data = data; +} + +util.inherits(ArchiverError, Error); + +exports = module.exports = ArchiverError; + +/***/ }), + +/***/ 68177: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +/** + * JSON Format Plugin + * + * @module plugins/json + * @license [MIT]{@link https://github.com/archiverjs/node-archiver/blob/master/LICENSE} + * @copyright (c) 2012-2014 Chris Talkington, contributors. + */ +var inherits = (__nccwpck_require__(73837).inherits); +var Transform = (__nccwpck_require__(67031).Transform); + +var crc32 = __nccwpck_require__(14087); +var util = __nccwpck_require__(88560); + +/** + * @constructor + * @param {(JsonOptions|TransformOptions)} options + */ +var Json = function(options) { + if (!(this instanceof Json)) { + return new Json(options); + } + + options = this.options = util.defaults(options, {}); + + Transform.call(this, options); + + this.supports = { + directory: true, + symlink: true + }; + + this.files = []; +}; + +inherits(Json, Transform); + +/** + * [_transform description] + * + * @private + * @param {Buffer} chunk + * @param {String} encoding + * @param {Function} callback + * @return void + */ +Json.prototype._transform = function(chunk, encoding, callback) { + callback(null, chunk); +}; + +/** + * [_writeStringified description] + * + * @private + * @return void + */ +Json.prototype._writeStringified = function() { + var fileString = JSON.stringify(this.files); + this.write(fileString); +}; + +/** + * [append description] + * + * @param {(Buffer|Stream)} source + * @param {EntryData} data + * @param {Function} callback + * @return void + */ +Json.prototype.append = function(source, data, callback) { + var self = this; + + data.crc32 = 0; + + function onend(err, sourceBuffer) { + if (err) { + callback(err); + return; + } + + data.size = sourceBuffer.length || 0; + data.crc32 = crc32.unsigned(sourceBuffer); + + self.files.push(data); + + callback(null, data); + } + + if (data.sourceType === 'buffer') { + onend(null, source); + } else if (data.sourceType === 'stream') { + util.collectStream(source, onend); + } +}; + +/** + * [finalize description] + * + * @return void + */ +Json.prototype.finalize = function() { + this._writeStringified(); + this.end(); +}; + +module.exports = Json; + +/** + * @typedef {Object} JsonOptions + * @global + */ + + +/***/ }), + +/***/ 50571: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +/** + * TAR Format Plugin + * + * @module plugins/tar + * @license [MIT]{@link https://github.com/archiverjs/node-archiver/blob/master/LICENSE} + * @copyright (c) 2012-2014 Chris Talkington, contributors. + */ +var zlib = __nccwpck_require__(59796); + +var engine = __nccwpck_require__(35893); +var util = __nccwpck_require__(88560); + +/** + * @constructor + * @param {TarOptions} options + */ +var Tar = function(options) { + if (!(this instanceof Tar)) { + return new Tar(options); + } + + options = this.options = util.defaults(options, { + gzip: false + }); + + if (typeof options.gzipOptions !== 'object') { + options.gzipOptions = {}; + } + + this.supports = { + directory: true, + symlink: true + }; + + this.engine = engine.pack(options); + this.compressor = false; + + if (options.gzip) { + this.compressor = zlib.createGzip(options.gzipOptions); + this.compressor.on('error', this._onCompressorError.bind(this)); + } +}; + +/** + * [_onCompressorError description] + * + * @private + * @param {Error} err + * @return void + */ +Tar.prototype._onCompressorError = function(err) { + this.engine.emit('error', err); +}; + +/** + * [append description] + * + * @param {(Buffer|Stream)} source + * @param {TarEntryData} data + * @param {Function} callback + * @return void + */ +Tar.prototype.append = function(source, data, callback) { + var self = this; + + data.mtime = data.date; + + function append(err, sourceBuffer) { + if (err) { + callback(err); + return; + } + + self.engine.entry(data, sourceBuffer, function(err) { + callback(err, data); + }); + } + + if (data.sourceType === 'buffer') { + append(null, source); + } else if (data.sourceType === 'stream' && data.stats) { + data.size = data.stats.size; + + var entry = self.engine.entry(data, function(err) { + callback(err, data); + }); + + source.pipe(entry); + } else if (data.sourceType === 'stream') { + util.collectStream(source, append); + } +}; + +/** + * [finalize description] + * + * @return void + */ +Tar.prototype.finalize = function() { + this.engine.finalize(); +}; + +/** + * [on description] + * + * @return this.engine + */ +Tar.prototype.on = function() { + return this.engine.on.apply(this.engine, arguments); +}; + +/** + * [pipe description] + * + * @param {String} destination + * @param {Object} options + * @return this.engine + */ +Tar.prototype.pipe = function(destination, options) { + if (this.compressor) { + return this.engine.pipe.apply(this.engine, [this.compressor]).pipe(destination, options); + } else { + return this.engine.pipe.apply(this.engine, arguments); + } +}; + +/** + * [unpipe description] + * + * @return this.engine + */ +Tar.prototype.unpipe = function() { + if (this.compressor) { + return this.compressor.unpipe.apply(this.compressor, arguments); + } else { + return this.engine.unpipe.apply(this.engine, arguments); + } +}; + +module.exports = Tar; + +/** + * @typedef {Object} TarOptions + * @global + * @property {Boolean} [gzip=false] Compress the tar archive using gzip. + * @property {Object} [gzipOptions] Passed to [zlib]{@link https://nodejs.org/api/zlib.html#zlib_class_options} + * to control compression. + * @property {*} [*] See [tar-stream]{@link https://github.com/mafintosh/tar-stream} documentation for additional properties. + */ + +/** + * @typedef {Object} TarEntryData + * @global + * @property {String} name Sets the entry name including internal path. + * @property {(String|Date)} [date=NOW()] Sets the entry date. + * @property {Number} [mode=D:0755/F:0644] Sets the entry permissions. + * @property {String} [prefix] Sets a path prefix for the entry name. Useful + * when working with methods like `directory` or `glob`. + * @property {fs.Stats} [stats] Sets the fs stat data for this entry allowing + * for reduction of fs stat calls when stat data is already known. + */ + +/** + * TarStream Module + * @external TarStream + * @see {@link https://github.com/mafintosh/tar-stream} + */ + + +/***/ }), + +/***/ 14453: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +/** + * ZIP Format Plugin + * + * @module plugins/zip + * @license [MIT]{@link https://github.com/archiverjs/node-archiver/blob/master/LICENSE} + * @copyright (c) 2012-2014 Chris Talkington, contributors. + */ +var engine = __nccwpck_require__(17754); +var util = __nccwpck_require__(88560); + +/** + * @constructor + * @param {ZipOptions} [options] + * @param {String} [options.comment] Sets the zip archive comment. + * @param {Boolean} [options.forceLocalTime=false] Forces the archive to contain local file times instead of UTC. + * @param {Boolean} [options.forceZip64=false] Forces the archive to contain ZIP64 headers. + * @param {Boolean} [options.store=false] Sets the compression method to STORE. + * @param {Object} [options.zlib] Passed to [zlib]{@link https://nodejs.org/api/zlib.html#zlib_class_options} + */ +var Zip = function(options) { + if (!(this instanceof Zip)) { + return new Zip(options); + } + + options = this.options = util.defaults(options, { + comment: '', + forceUTC: false, + store: false + }); + + this.supports = { + directory: true, + symlink: true + }; + + this.engine = new engine(options); +}; + +/** + * @param {(Buffer|Stream)} source + * @param {ZipEntryData} data + * @param {String} data.name Sets the entry name including internal path. + * @param {(String|Date)} [data.date=NOW()] Sets the entry date. + * @param {Number} [data.mode=D:0755/F:0644] Sets the entry permissions. + * @param {String} [data.prefix] Sets a path prefix for the entry name. Useful + * when working with methods like `directory` or `glob`. + * @param {fs.Stats} [data.stats] Sets the fs stat data for this entry allowing + * for reduction of fs stat calls when stat data is already known. + * @param {Boolean} [data.store=ZipOptions.store] Sets the compression method to STORE. + * @param {Function} callback + * @return void + */ +Zip.prototype.append = function(source, data, callback) { + this.engine.entry(source, data, callback); +}; + +/** + * @return void + */ +Zip.prototype.finalize = function() { + this.engine.finalize(); +}; + +/** + * @return this.engine + */ +Zip.prototype.on = function() { + return this.engine.on.apply(this.engine, arguments); +}; + +/** + * @return this.engine + */ +Zip.prototype.pipe = function() { + return this.engine.pipe.apply(this.engine, arguments); +}; + +/** + * @return this.engine + */ +Zip.prototype.unpipe = function() { + return this.engine.unpipe.apply(this.engine, arguments); +}; + +module.exports = Zip; + +/** + * @typedef {Object} ZipOptions + * @global + * @property {String} [comment] Sets the zip archive comment. + * @property {Boolean} [forceLocalTime=false] Forces the archive to contain local file times instead of UTC. + * @property {Boolean} [forceZip64=false] Forces the archive to contain ZIP64 headers. + * @property {Boolean} [store=false] Sets the compression method to STORE. + * @property {Object} [zlib] Passed to [zlib]{@link https://nodejs.org/api/zlib.html#zlib_class_options} + * to control compression. + * @property {*} [*] See [zip-stream]{@link https://archiverjs.com/zip-stream/ZipStream.html} documentation for current list of properties. + */ + +/** + * @typedef {Object} ZipEntryData + * @global + * @property {String} name Sets the entry name including internal path. + * @property {(String|Date)} [date=NOW()] Sets the entry date. + * @property {Number} [mode=D:0755/F:0644] Sets the entry permissions. + * @property {String} [prefix] Sets a path prefix for the entry name. Useful + * when working with methods like `directory` or `glob`. + * @property {fs.Stats} [stats] Sets the fs stat data for this entry allowing + * for reduction of fs stat calls when stat data is already known. + * @property {Boolean} [store=ZipOptions.store] Sets the compression method to STORE. + */ + +/** + * ZipStream Module + * @external ZipStream + * @see {@link https://www.archiverjs.com/zip-stream/ZipStream.html} + */ + + +/***/ }), + +/***/ 7190: +/***/ ((module) => { + +"use strict"; + + +const codes = {}; + +function createErrorType(code, message, Base) { + if (!Base) { + Base = Error + } + + function getMessage (arg1, arg2, arg3) { + if (typeof message === 'string') { + return message + } else { + return message(arg1, arg2, arg3) + } + } + + class NodeError extends Base { + constructor (arg1, arg2, arg3) { + super(getMessage(arg1, arg2, arg3)); + } + } + + NodeError.prototype.name = Base.name; + NodeError.prototype.code = code; + + codes[code] = NodeError; +} + +// https://github.com/nodejs/node/blob/v10.8.0/lib/internal/errors.js +function oneOf(expected, thing) { + if (Array.isArray(expected)) { + const len = expected.length; + expected = expected.map((i) => String(i)); + if (len > 2) { + return `one of ${thing} ${expected.slice(0, len - 1).join(', ')}, or ` + + expected[len - 1]; + } else if (len === 2) { + return `one of ${thing} ${expected[0]} or ${expected[1]}`; + } else { + return `of ${thing} ${expected[0]}`; + } + } else { + return `of ${thing} ${String(expected)}`; + } +} + +// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/startsWith +function startsWith(str, search, pos) { + return str.substr(!pos || pos < 0 ? 0 : +pos, search.length) === search; +} + +// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/endsWith +function endsWith(str, search, this_len) { + if (this_len === undefined || this_len > str.length) { + this_len = str.length; + } + return str.substring(this_len - search.length, this_len) === search; +} + +// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/includes +function includes(str, search, start) { + if (typeof start !== 'number') { + start = 0; + } + + if (start + search.length > str.length) { + return false; + } else { + return str.indexOf(search, start) !== -1; + } +} + +createErrorType('ERR_INVALID_OPT_VALUE', function (name, value) { + return 'The value "' + value + '" is invalid for option "' + name + '"' +}, TypeError); +createErrorType('ERR_INVALID_ARG_TYPE', function (name, expected, actual) { + // determiner: 'must be' or 'must not be' + let determiner; + if (typeof expected === 'string' && startsWith(expected, 'not ')) { + determiner = 'must not be'; + expected = expected.replace(/^not /, ''); + } else { + determiner = 'must be'; + } + + let msg; + if (endsWith(name, ' argument')) { + // For cases like 'first argument' + msg = `The ${name} ${determiner} ${oneOf(expected, 'type')}`; + } else { + const type = includes(name, '.') ? 'property' : 'argument'; + msg = `The "${name}" ${type} ${determiner} ${oneOf(expected, 'type')}`; + } + + msg += `. Received type ${typeof actual}`; + return msg; +}, TypeError); +createErrorType('ERR_STREAM_PUSH_AFTER_EOF', 'stream.push() after EOF'); +createErrorType('ERR_METHOD_NOT_IMPLEMENTED', function (name) { + return 'The ' + name + ' method is not implemented' +}); +createErrorType('ERR_STREAM_PREMATURE_CLOSE', 'Premature close'); +createErrorType('ERR_STREAM_DESTROYED', function (name) { + return 'Cannot call ' + name + ' after a stream was destroyed'; +}); +createErrorType('ERR_MULTIPLE_CALLBACK', 'Callback called multiple times'); +createErrorType('ERR_STREAM_CANNOT_PIPE', 'Cannot pipe, not readable'); +createErrorType('ERR_STREAM_WRITE_AFTER_END', 'write after end'); +createErrorType('ERR_STREAM_NULL_VALUES', 'May not write null values to stream', TypeError); +createErrorType('ERR_UNKNOWN_ENCODING', function (arg) { + return 'Unknown encoding: ' + arg +}, TypeError); +createErrorType('ERR_STREAM_UNSHIFT_AFTER_END_EVENT', 'stream.unshift() after end event'); + +module.exports.q = codes; + + +/***/ }), + +/***/ 91107: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +"use strict"; +// Copyright Joyent, Inc. and other Node contributors. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to permit +// persons to whom the Software is furnished to do so, subject to the +// following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +// USE OR OTHER DEALINGS IN THE SOFTWARE. +// a duplex stream is just a stream that is both readable and writable. +// Since JS doesn't have multiple prototypal inheritance, this class +// prototypally inherits from Readable, and then parasitically from +// Writable. + +/**/ + +var objectKeys = Object.keys || function (obj) { + var keys = []; + + for (var key in obj) { + keys.push(key); + } + + return keys; +}; +/**/ + + +module.exports = Duplex; + +var Readable = __nccwpck_require__(41132); + +var Writable = __nccwpck_require__(56573); + +__nccwpck_require__(79557)(Duplex, Readable); + +{ + // Allow the keys array to be GC'ed. + var keys = objectKeys(Writable.prototype); + + for (var v = 0; v < keys.length; v++) { + var method = keys[v]; + if (!Duplex.prototype[method]) Duplex.prototype[method] = Writable.prototype[method]; + } +} + +function Duplex(options) { + if (!(this instanceof Duplex)) return new Duplex(options); + Readable.call(this, options); + Writable.call(this, options); + this.allowHalfOpen = true; + + if (options) { + if (options.readable === false) this.readable = false; + if (options.writable === false) this.writable = false; + + if (options.allowHalfOpen === false) { + this.allowHalfOpen = false; + this.once('end', onend); + } + } +} + +Object.defineProperty(Duplex.prototype, 'writableHighWaterMark', { + // making it explicit this property is not enumerable + // because otherwise some prototype manipulation in + // userland will fail + enumerable: false, + get: function get() { + return this._writableState.highWaterMark; + } +}); +Object.defineProperty(Duplex.prototype, 'writableBuffer', { + // making it explicit this property is not enumerable + // because otherwise some prototype manipulation in + // userland will fail + enumerable: false, + get: function get() { + return this._writableState && this._writableState.getBuffer(); + } +}); +Object.defineProperty(Duplex.prototype, 'writableLength', { + // making it explicit this property is not enumerable + // because otherwise some prototype manipulation in + // userland will fail + enumerable: false, + get: function get() { + return this._writableState.length; + } +}); // the no-half-open enforcer + +function onend() { + // If the writable side ended, then we're ok. + if (this._writableState.ended) return; // no more data can be written. + // But allow more writes to happen in this tick. + + process.nextTick(onEndNT, this); +} + +function onEndNT(self) { + self.end(); +} + +Object.defineProperty(Duplex.prototype, 'destroyed', { + // making it explicit this property is not enumerable + // because otherwise some prototype manipulation in + // userland will fail + enumerable: false, + get: function get() { + if (this._readableState === undefined || this._writableState === undefined) { + return false; + } + + return this._readableState.destroyed && this._writableState.destroyed; + }, + set: function set(value) { + // we ignore the value if the stream + // has not been initialized yet + if (this._readableState === undefined || this._writableState === undefined) { + return; + } // backward compatibility, the user is explicitly + // managing destroyed + + + this._readableState.destroyed = value; + this._writableState.destroyed = value; + } +}); + +/***/ }), + +/***/ 30933: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +"use strict"; +// Copyright Joyent, Inc. and other Node contributors. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to permit +// persons to whom the Software is furnished to do so, subject to the +// following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +// USE OR OTHER DEALINGS IN THE SOFTWARE. +// a passthrough stream. +// basically just the most minimal sort of Transform stream. +// Every written chunk gets output as-is. + + +module.exports = PassThrough; + +var Transform = __nccwpck_require__(38469); + +__nccwpck_require__(79557)(PassThrough, Transform); + +function PassThrough(options) { + if (!(this instanceof PassThrough)) return new PassThrough(options); + Transform.call(this, options); +} + +PassThrough.prototype._transform = function (chunk, encoding, cb) { + cb(null, chunk); +}; + +/***/ }), + +/***/ 41132: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +"use strict"; +// Copyright Joyent, Inc. and other Node contributors. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to permit +// persons to whom the Software is furnished to do so, subject to the +// following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +// USE OR OTHER DEALINGS IN THE SOFTWARE. + + +module.exports = Readable; +/**/ + +var Duplex; +/**/ + +Readable.ReadableState = ReadableState; +/**/ + +var EE = (__nccwpck_require__(82361).EventEmitter); + +var EElistenerCount = function EElistenerCount(emitter, type) { + return emitter.listeners(type).length; +}; +/**/ + +/**/ + + +var Stream = __nccwpck_require__(49995); +/**/ + + +var Buffer = (__nccwpck_require__(14300).Buffer); + +var OurUint8Array = global.Uint8Array || function () {}; + +function _uint8ArrayToBuffer(chunk) { + return Buffer.from(chunk); +} + +function _isUint8Array(obj) { + return Buffer.isBuffer(obj) || obj instanceof OurUint8Array; +} +/**/ + + +var debugUtil = __nccwpck_require__(73837); + +var debug; + +if (debugUtil && debugUtil.debuglog) { + debug = debugUtil.debuglog('stream'); +} else { + debug = function debug() {}; +} +/**/ + + +var BufferList = __nccwpck_require__(30168); + +var destroyImpl = __nccwpck_require__(33844); + +var _require = __nccwpck_require__(95408), + getHighWaterMark = _require.getHighWaterMark; + +var _require$codes = (__nccwpck_require__(7190)/* .codes */ .q), + ERR_INVALID_ARG_TYPE = _require$codes.ERR_INVALID_ARG_TYPE, + ERR_STREAM_PUSH_AFTER_EOF = _require$codes.ERR_STREAM_PUSH_AFTER_EOF, + ERR_METHOD_NOT_IMPLEMENTED = _require$codes.ERR_METHOD_NOT_IMPLEMENTED, + ERR_STREAM_UNSHIFT_AFTER_END_EVENT = _require$codes.ERR_STREAM_UNSHIFT_AFTER_END_EVENT; // Lazy loaded to improve the startup performance. + + +var StringDecoder; +var createReadableStreamAsyncIterator; +var from; + +__nccwpck_require__(79557)(Readable, Stream); + +var errorOrDestroy = destroyImpl.errorOrDestroy; +var kProxyEvents = ['error', 'close', 'destroy', 'pause', 'resume']; + +function prependListener(emitter, event, fn) { + // Sadly this is not cacheable as some libraries bundle their own + // event emitter implementation with them. + if (typeof emitter.prependListener === 'function') return emitter.prependListener(event, fn); // This is a hack to make sure that our error handler is attached before any + // userland ones. NEVER DO THIS. This is here only because this code needs + // to continue to work with older versions of Node.js that do not include + // the prependListener() method. The goal is to eventually remove this hack. + + if (!emitter._events || !emitter._events[event]) emitter.on(event, fn);else if (Array.isArray(emitter._events[event])) emitter._events[event].unshift(fn);else emitter._events[event] = [fn, emitter._events[event]]; +} + +function ReadableState(options, stream, isDuplex) { + Duplex = Duplex || __nccwpck_require__(91107); + options = options || {}; // Duplex streams are both readable and writable, but share + // the same options object. + // However, some cases require setting options to different + // values for the readable and the writable sides of the duplex stream. + // These options can be provided separately as readableXXX and writableXXX. + + if (typeof isDuplex !== 'boolean') isDuplex = stream instanceof Duplex; // object stream flag. Used to make read(n) ignore n and to + // make all the buffer merging and length checks go away + + this.objectMode = !!options.objectMode; + if (isDuplex) this.objectMode = this.objectMode || !!options.readableObjectMode; // the point at which it stops calling _read() to fill the buffer + // Note: 0 is a valid value, means "don't call _read preemptively ever" + + this.highWaterMark = getHighWaterMark(this, options, 'readableHighWaterMark', isDuplex); // A linked list is used to store data chunks instead of an array because the + // linked list can remove elements from the beginning faster than + // array.shift() + + this.buffer = new BufferList(); + this.length = 0; + this.pipes = null; + this.pipesCount = 0; + this.flowing = null; + this.ended = false; + this.endEmitted = false; + this.reading = false; // a flag to be able to tell if the event 'readable'/'data' is emitted + // immediately, or on a later tick. We set this to true at first, because + // any actions that shouldn't happen until "later" should generally also + // not happen before the first read call. + + this.sync = true; // whenever we return null, then we set a flag to say + // that we're awaiting a 'readable' event emission. + + this.needReadable = false; + this.emittedReadable = false; + this.readableListening = false; + this.resumeScheduled = false; + this.paused = true; // Should close be emitted on destroy. Defaults to true. + + this.emitClose = options.emitClose !== false; // Should .destroy() be called after 'end' (and potentially 'finish') + + this.autoDestroy = !!options.autoDestroy; // has it been destroyed + + this.destroyed = false; // Crypto is kind of old and crusty. Historically, its default string + // encoding is 'binary' so we have to make this configurable. + // Everything else in the universe uses 'utf8', though. + + this.defaultEncoding = options.defaultEncoding || 'utf8'; // the number of writers that are awaiting a drain event in .pipe()s + + this.awaitDrain = 0; // if true, a maybeReadMore has been scheduled + + this.readingMore = false; + this.decoder = null; + this.encoding = null; + + if (options.encoding) { + if (!StringDecoder) StringDecoder = (__nccwpck_require__(5157)/* .StringDecoder */ .s); + this.decoder = new StringDecoder(options.encoding); + this.encoding = options.encoding; + } +} + +function Readable(options) { + Duplex = Duplex || __nccwpck_require__(91107); + if (!(this instanceof Readable)) return new Readable(options); // Checking for a Stream.Duplex instance is faster here instead of inside + // the ReadableState constructor, at least with V8 6.5 + + var isDuplex = this instanceof Duplex; + this._readableState = new ReadableState(options, this, isDuplex); // legacy + + this.readable = true; + + if (options) { + if (typeof options.read === 'function') this._read = options.read; + if (typeof options.destroy === 'function') this._destroy = options.destroy; + } + + Stream.call(this); +} + +Object.defineProperty(Readable.prototype, 'destroyed', { + // making it explicit this property is not enumerable + // because otherwise some prototype manipulation in + // userland will fail + enumerable: false, + get: function get() { + if (this._readableState === undefined) { + return false; + } + + return this._readableState.destroyed; + }, + set: function set(value) { + // we ignore the value if the stream + // has not been initialized yet + if (!this._readableState) { + return; + } // backward compatibility, the user is explicitly + // managing destroyed + + + this._readableState.destroyed = value; + } +}); +Readable.prototype.destroy = destroyImpl.destroy; +Readable.prototype._undestroy = destroyImpl.undestroy; + +Readable.prototype._destroy = function (err, cb) { + cb(err); +}; // Manually shove something into the read() buffer. +// This returns true if the highWaterMark has not been hit yet, +// similar to how Writable.write() returns true if you should +// write() some more. + + +Readable.prototype.push = function (chunk, encoding) { + var state = this._readableState; + var skipChunkCheck; + + if (!state.objectMode) { + if (typeof chunk === 'string') { + encoding = encoding || state.defaultEncoding; + + if (encoding !== state.encoding) { + chunk = Buffer.from(chunk, encoding); + encoding = ''; + } + + skipChunkCheck = true; + } + } else { + skipChunkCheck = true; + } + + return readableAddChunk(this, chunk, encoding, false, skipChunkCheck); +}; // Unshift should *always* be something directly out of read() + + +Readable.prototype.unshift = function (chunk) { + return readableAddChunk(this, chunk, null, true, false); +}; + +function readableAddChunk(stream, chunk, encoding, addToFront, skipChunkCheck) { + debug('readableAddChunk', chunk); + var state = stream._readableState; + + if (chunk === null) { + state.reading = false; + onEofChunk(stream, state); + } else { + var er; + if (!skipChunkCheck) er = chunkInvalid(state, chunk); + + if (er) { + errorOrDestroy(stream, er); + } else if (state.objectMode || chunk && chunk.length > 0) { + if (typeof chunk !== 'string' && !state.objectMode && Object.getPrototypeOf(chunk) !== Buffer.prototype) { + chunk = _uint8ArrayToBuffer(chunk); + } + + if (addToFront) { + if (state.endEmitted) errorOrDestroy(stream, new ERR_STREAM_UNSHIFT_AFTER_END_EVENT());else addChunk(stream, state, chunk, true); + } else if (state.ended) { + errorOrDestroy(stream, new ERR_STREAM_PUSH_AFTER_EOF()); + } else if (state.destroyed) { + return false; + } else { + state.reading = false; + + if (state.decoder && !encoding) { + chunk = state.decoder.write(chunk); + if (state.objectMode || chunk.length !== 0) addChunk(stream, state, chunk, false);else maybeReadMore(stream, state); + } else { + addChunk(stream, state, chunk, false); + } + } + } else if (!addToFront) { + state.reading = false; + maybeReadMore(stream, state); + } + } // We can push more data if we are below the highWaterMark. + // Also, if we have no data yet, we can stand some more bytes. + // This is to work around cases where hwm=0, such as the repl. + + + return !state.ended && (state.length < state.highWaterMark || state.length === 0); +} + +function addChunk(stream, state, chunk, addToFront) { + if (state.flowing && state.length === 0 && !state.sync) { + state.awaitDrain = 0; + stream.emit('data', chunk); + } else { + // update the buffer info. + state.length += state.objectMode ? 1 : chunk.length; + if (addToFront) state.buffer.unshift(chunk);else state.buffer.push(chunk); + if (state.needReadable) emitReadable(stream); + } + + maybeReadMore(stream, state); +} + +function chunkInvalid(state, chunk) { + var er; + + if (!_isUint8Array(chunk) && typeof chunk !== 'string' && chunk !== undefined && !state.objectMode) { + er = new ERR_INVALID_ARG_TYPE('chunk', ['string', 'Buffer', 'Uint8Array'], chunk); + } + + return er; +} + +Readable.prototype.isPaused = function () { + return this._readableState.flowing === false; +}; // backwards compatibility. + + +Readable.prototype.setEncoding = function (enc) { + if (!StringDecoder) StringDecoder = (__nccwpck_require__(5157)/* .StringDecoder */ .s); + var decoder = new StringDecoder(enc); + this._readableState.decoder = decoder; // If setEncoding(null), decoder.encoding equals utf8 + + this._readableState.encoding = this._readableState.decoder.encoding; // Iterate over current buffer to convert already stored Buffers: + + var p = this._readableState.buffer.head; + var content = ''; + + while (p !== null) { + content += decoder.write(p.data); + p = p.next; + } + + this._readableState.buffer.clear(); + + if (content !== '') this._readableState.buffer.push(content); + this._readableState.length = content.length; + return this; +}; // Don't raise the hwm > 1GB + + +var MAX_HWM = 0x40000000; + +function computeNewHighWaterMark(n) { + if (n >= MAX_HWM) { + // TODO(ronag): Throw ERR_VALUE_OUT_OF_RANGE. + n = MAX_HWM; + } else { + // Get the next highest power of 2 to prevent increasing hwm excessively in + // tiny amounts + n--; + n |= n >>> 1; + n |= n >>> 2; + n |= n >>> 4; + n |= n >>> 8; + n |= n >>> 16; + n++; + } + + return n; +} // This function is designed to be inlinable, so please take care when making +// changes to the function body. + + +function howMuchToRead(n, state) { + if (n <= 0 || state.length === 0 && state.ended) return 0; + if (state.objectMode) return 1; + + if (n !== n) { + // Only flow one buffer at a time + if (state.flowing && state.length) return state.buffer.head.data.length;else return state.length; + } // If we're asking for more than the current hwm, then raise the hwm. + + + if (n > state.highWaterMark) state.highWaterMark = computeNewHighWaterMark(n); + if (n <= state.length) return n; // Don't have enough + + if (!state.ended) { + state.needReadable = true; + return 0; + } + + return state.length; +} // you can override either this method, or the async _read(n) below. + + +Readable.prototype.read = function (n) { + debug('read', n); + n = parseInt(n, 10); + var state = this._readableState; + var nOrig = n; + if (n !== 0) state.emittedReadable = false; // if we're doing read(0) to trigger a readable event, but we + // already have a bunch of data in the buffer, then just trigger + // the 'readable' event and move on. + + if (n === 0 && state.needReadable && ((state.highWaterMark !== 0 ? state.length >= state.highWaterMark : state.length > 0) || state.ended)) { + debug('read: emitReadable', state.length, state.ended); + if (state.length === 0 && state.ended) endReadable(this);else emitReadable(this); + return null; + } + + n = howMuchToRead(n, state); // if we've ended, and we're now clear, then finish it up. + + if (n === 0 && state.ended) { + if (state.length === 0) endReadable(this); + return null; + } // All the actual chunk generation logic needs to be + // *below* the call to _read. The reason is that in certain + // synthetic stream cases, such as passthrough streams, _read + // may be a completely synchronous operation which may change + // the state of the read buffer, providing enough data when + // before there was *not* enough. + // + // So, the steps are: + // 1. Figure out what the state of things will be after we do + // a read from the buffer. + // + // 2. If that resulting state will trigger a _read, then call _read. + // Note that this may be asynchronous, or synchronous. Yes, it is + // deeply ugly to write APIs this way, but that still doesn't mean + // that the Readable class should behave improperly, as streams are + // designed to be sync/async agnostic. + // Take note if the _read call is sync or async (ie, if the read call + // has returned yet), so that we know whether or not it's safe to emit + // 'readable' etc. + // + // 3. Actually pull the requested chunks out of the buffer and return. + // if we need a readable event, then we need to do some reading. + + + var doRead = state.needReadable; + debug('need readable', doRead); // if we currently have less than the highWaterMark, then also read some + + if (state.length === 0 || state.length - n < state.highWaterMark) { + doRead = true; + debug('length less than watermark', doRead); + } // however, if we've ended, then there's no point, and if we're already + // reading, then it's unnecessary. + + + if (state.ended || state.reading) { + doRead = false; + debug('reading or ended', doRead); + } else if (doRead) { + debug('do read'); + state.reading = true; + state.sync = true; // if the length is currently zero, then we *need* a readable event. + + if (state.length === 0) state.needReadable = true; // call internal read method + + this._read(state.highWaterMark); + + state.sync = false; // If _read pushed data synchronously, then `reading` will be false, + // and we need to re-evaluate how much data we can return to the user. + + if (!state.reading) n = howMuchToRead(nOrig, state); + } + + var ret; + if (n > 0) ret = fromList(n, state);else ret = null; + + if (ret === null) { + state.needReadable = state.length <= state.highWaterMark; + n = 0; + } else { + state.length -= n; + state.awaitDrain = 0; + } + + if (state.length === 0) { + // If we have nothing in the buffer, then we want to know + // as soon as we *do* get something into the buffer. + if (!state.ended) state.needReadable = true; // If we tried to read() past the EOF, then emit end on the next tick. + + if (nOrig !== n && state.ended) endReadable(this); + } + + if (ret !== null) this.emit('data', ret); + return ret; +}; + +function onEofChunk(stream, state) { + debug('onEofChunk'); + if (state.ended) return; + + if (state.decoder) { + var chunk = state.decoder.end(); + + if (chunk && chunk.length) { + state.buffer.push(chunk); + state.length += state.objectMode ? 1 : chunk.length; + } + } + + state.ended = true; + + if (state.sync) { + // if we are sync, wait until next tick to emit the data. + // Otherwise we risk emitting data in the flow() + // the readable code triggers during a read() call + emitReadable(stream); + } else { + // emit 'readable' now to make sure it gets picked up. + state.needReadable = false; + + if (!state.emittedReadable) { + state.emittedReadable = true; + emitReadable_(stream); + } + } +} // Don't emit readable right away in sync mode, because this can trigger +// another read() call => stack overflow. This way, it might trigger +// a nextTick recursion warning, but that's not so bad. + + +function emitReadable(stream) { + var state = stream._readableState; + debug('emitReadable', state.needReadable, state.emittedReadable); + state.needReadable = false; + + if (!state.emittedReadable) { + debug('emitReadable', state.flowing); + state.emittedReadable = true; + process.nextTick(emitReadable_, stream); + } +} + +function emitReadable_(stream) { + var state = stream._readableState; + debug('emitReadable_', state.destroyed, state.length, state.ended); + + if (!state.destroyed && (state.length || state.ended)) { + stream.emit('readable'); + state.emittedReadable = false; + } // The stream needs another readable event if + // 1. It is not flowing, as the flow mechanism will take + // care of it. + // 2. It is not ended. + // 3. It is below the highWaterMark, so we can schedule + // another readable later. + + + state.needReadable = !state.flowing && !state.ended && state.length <= state.highWaterMark; + flow(stream); +} // at this point, the user has presumably seen the 'readable' event, +// and called read() to consume some data. that may have triggered +// in turn another _read(n) call, in which case reading = true if +// it's in progress. +// However, if we're not ended, or reading, and the length < hwm, +// then go ahead and try to read some more preemptively. + + +function maybeReadMore(stream, state) { + if (!state.readingMore) { + state.readingMore = true; + process.nextTick(maybeReadMore_, stream, state); + } +} + +function maybeReadMore_(stream, state) { + // Attempt to read more data if we should. + // + // The conditions for reading more data are (one of): + // - Not enough data buffered (state.length < state.highWaterMark). The loop + // is responsible for filling the buffer with enough data if such data + // is available. If highWaterMark is 0 and we are not in the flowing mode + // we should _not_ attempt to buffer any extra data. We'll get more data + // when the stream consumer calls read() instead. + // - No data in the buffer, and the stream is in flowing mode. In this mode + // the loop below is responsible for ensuring read() is called. Failing to + // call read here would abort the flow and there's no other mechanism for + // continuing the flow if the stream consumer has just subscribed to the + // 'data' event. + // + // In addition to the above conditions to keep reading data, the following + // conditions prevent the data from being read: + // - The stream has ended (state.ended). + // - There is already a pending 'read' operation (state.reading). This is a + // case where the the stream has called the implementation defined _read() + // method, but they are processing the call asynchronously and have _not_ + // called push() with new data. In this case we skip performing more + // read()s. The execution ends in this method again after the _read() ends + // up calling push() with more data. + while (!state.reading && !state.ended && (state.length < state.highWaterMark || state.flowing && state.length === 0)) { + var len = state.length; + debug('maybeReadMore read 0'); + stream.read(0); + if (len === state.length) // didn't get any data, stop spinning. + break; + } + + state.readingMore = false; +} // abstract method. to be overridden in specific implementation classes. +// call cb(er, data) where data is <= n in length. +// for virtual (non-string, non-buffer) streams, "length" is somewhat +// arbitrary, and perhaps not very meaningful. + + +Readable.prototype._read = function (n) { + errorOrDestroy(this, new ERR_METHOD_NOT_IMPLEMENTED('_read()')); +}; + +Readable.prototype.pipe = function (dest, pipeOpts) { + var src = this; + var state = this._readableState; + + switch (state.pipesCount) { + case 0: + state.pipes = dest; + break; + + case 1: + state.pipes = [state.pipes, dest]; + break; + + default: + state.pipes.push(dest); + break; + } + + state.pipesCount += 1; + debug('pipe count=%d opts=%j', state.pipesCount, pipeOpts); + var doEnd = (!pipeOpts || pipeOpts.end !== false) && dest !== process.stdout && dest !== process.stderr; + var endFn = doEnd ? onend : unpipe; + if (state.endEmitted) process.nextTick(endFn);else src.once('end', endFn); + dest.on('unpipe', onunpipe); + + function onunpipe(readable, unpipeInfo) { + debug('onunpipe'); + + if (readable === src) { + if (unpipeInfo && unpipeInfo.hasUnpiped === false) { + unpipeInfo.hasUnpiped = true; + cleanup(); + } + } + } + + function onend() { + debug('onend'); + dest.end(); + } // when the dest drains, it reduces the awaitDrain counter + // on the source. This would be more elegant with a .once() + // handler in flow(), but adding and removing repeatedly is + // too slow. + + + var ondrain = pipeOnDrain(src); + dest.on('drain', ondrain); + var cleanedUp = false; + + function cleanup() { + debug('cleanup'); // cleanup event handlers once the pipe is broken + + dest.removeListener('close', onclose); + dest.removeListener('finish', onfinish); + dest.removeListener('drain', ondrain); + dest.removeListener('error', onerror); + dest.removeListener('unpipe', onunpipe); + src.removeListener('end', onend); + src.removeListener('end', unpipe); + src.removeListener('data', ondata); + cleanedUp = true; // if the reader is waiting for a drain event from this + // specific writer, then it would cause it to never start + // flowing again. + // So, if this is awaiting a drain, then we just call it now. + // If we don't know, then assume that we are waiting for one. + + if (state.awaitDrain && (!dest._writableState || dest._writableState.needDrain)) ondrain(); + } + + src.on('data', ondata); + + function ondata(chunk) { + debug('ondata'); + var ret = dest.write(chunk); + debug('dest.write', ret); + + if (ret === false) { + // If the user unpiped during `dest.write()`, it is possible + // to get stuck in a permanently paused state if that write + // also returned false. + // => Check whether `dest` is still a piping destination. + if ((state.pipesCount === 1 && state.pipes === dest || state.pipesCount > 1 && indexOf(state.pipes, dest) !== -1) && !cleanedUp) { + debug('false write response, pause', state.awaitDrain); + state.awaitDrain++; + } + + src.pause(); + } + } // if the dest has an error, then stop piping into it. + // however, don't suppress the throwing behavior for this. + + + function onerror(er) { + debug('onerror', er); + unpipe(); + dest.removeListener('error', onerror); + if (EElistenerCount(dest, 'error') === 0) errorOrDestroy(dest, er); + } // Make sure our error handler is attached before userland ones. + + + prependListener(dest, 'error', onerror); // Both close and finish should trigger unpipe, but only once. + + function onclose() { + dest.removeListener('finish', onfinish); + unpipe(); + } + + dest.once('close', onclose); + + function onfinish() { + debug('onfinish'); + dest.removeListener('close', onclose); + unpipe(); + } + + dest.once('finish', onfinish); + + function unpipe() { + debug('unpipe'); + src.unpipe(dest); + } // tell the dest that it's being piped to + + + dest.emit('pipe', src); // start the flow if it hasn't been started already. + + if (!state.flowing) { + debug('pipe resume'); + src.resume(); + } + + return dest; +}; + +function pipeOnDrain(src) { + return function pipeOnDrainFunctionResult() { + var state = src._readableState; + debug('pipeOnDrain', state.awaitDrain); + if (state.awaitDrain) state.awaitDrain--; + + if (state.awaitDrain === 0 && EElistenerCount(src, 'data')) { + state.flowing = true; + flow(src); + } + }; +} + +Readable.prototype.unpipe = function (dest) { + var state = this._readableState; + var unpipeInfo = { + hasUnpiped: false + }; // if we're not piping anywhere, then do nothing. + + if (state.pipesCount === 0) return this; // just one destination. most common case. + + if (state.pipesCount === 1) { + // passed in one, but it's not the right one. + if (dest && dest !== state.pipes) return this; + if (!dest) dest = state.pipes; // got a match. + + state.pipes = null; + state.pipesCount = 0; + state.flowing = false; + if (dest) dest.emit('unpipe', this, unpipeInfo); + return this; + } // slow case. multiple pipe destinations. + + + if (!dest) { + // remove all. + var dests = state.pipes; + var len = state.pipesCount; + state.pipes = null; + state.pipesCount = 0; + state.flowing = false; + + for (var i = 0; i < len; i++) { + dests[i].emit('unpipe', this, { + hasUnpiped: false + }); + } + + return this; + } // try to find the right one. + + + var index = indexOf(state.pipes, dest); + if (index === -1) return this; + state.pipes.splice(index, 1); + state.pipesCount -= 1; + if (state.pipesCount === 1) state.pipes = state.pipes[0]; + dest.emit('unpipe', this, unpipeInfo); + return this; +}; // set up data events if they are asked for +// Ensure readable listeners eventually get something + + +Readable.prototype.on = function (ev, fn) { + var res = Stream.prototype.on.call(this, ev, fn); + var state = this._readableState; + + if (ev === 'data') { + // update readableListening so that resume() may be a no-op + // a few lines down. This is needed to support once('readable'). + state.readableListening = this.listenerCount('readable') > 0; // Try start flowing on next tick if stream isn't explicitly paused + + if (state.flowing !== false) this.resume(); + } else if (ev === 'readable') { + if (!state.endEmitted && !state.readableListening) { + state.readableListening = state.needReadable = true; + state.flowing = false; + state.emittedReadable = false; + debug('on readable', state.length, state.reading); + + if (state.length) { + emitReadable(this); + } else if (!state.reading) { + process.nextTick(nReadingNextTick, this); + } + } + } + + return res; +}; + +Readable.prototype.addListener = Readable.prototype.on; + +Readable.prototype.removeListener = function (ev, fn) { + var res = Stream.prototype.removeListener.call(this, ev, fn); + + if (ev === 'readable') { + // We need to check if there is someone still listening to + // readable and reset the state. However this needs to happen + // after readable has been emitted but before I/O (nextTick) to + // support once('readable', fn) cycles. This means that calling + // resume within the same tick will have no + // effect. + process.nextTick(updateReadableListening, this); + } + + return res; +}; + +Readable.prototype.removeAllListeners = function (ev) { + var res = Stream.prototype.removeAllListeners.apply(this, arguments); + + if (ev === 'readable' || ev === undefined) { + // We need to check if there is someone still listening to + // readable and reset the state. However this needs to happen + // after readable has been emitted but before I/O (nextTick) to + // support once('readable', fn) cycles. This means that calling + // resume within the same tick will have no + // effect. + process.nextTick(updateReadableListening, this); + } + + return res; +}; + +function updateReadableListening(self) { + var state = self._readableState; + state.readableListening = self.listenerCount('readable') > 0; + + if (state.resumeScheduled && !state.paused) { + // flowing needs to be set to true now, otherwise + // the upcoming resume will not flow. + state.flowing = true; // crude way to check if we should resume + } else if (self.listenerCount('data') > 0) { + self.resume(); + } +} + +function nReadingNextTick(self) { + debug('readable nexttick read 0'); + self.read(0); +} // pause() and resume() are remnants of the legacy readable stream API +// If the user uses them, then switch into old mode. + + +Readable.prototype.resume = function () { + var state = this._readableState; + + if (!state.flowing) { + debug('resume'); // we flow only if there is no one listening + // for readable, but we still have to call + // resume() + + state.flowing = !state.readableListening; + resume(this, state); + } + + state.paused = false; + return this; +}; + +function resume(stream, state) { + if (!state.resumeScheduled) { + state.resumeScheduled = true; + process.nextTick(resume_, stream, state); + } +} + +function resume_(stream, state) { + debug('resume', state.reading); + + if (!state.reading) { + stream.read(0); + } + + state.resumeScheduled = false; + stream.emit('resume'); + flow(stream); + if (state.flowing && !state.reading) stream.read(0); +} + +Readable.prototype.pause = function () { + debug('call pause flowing=%j', this._readableState.flowing); + + if (this._readableState.flowing !== false) { + debug('pause'); + this._readableState.flowing = false; + this.emit('pause'); + } + + this._readableState.paused = true; + return this; +}; + +function flow(stream) { + var state = stream._readableState; + debug('flow', state.flowing); + + while (state.flowing && stream.read() !== null) { + ; + } +} // wrap an old-style stream as the async data source. +// This is *not* part of the readable stream interface. +// It is an ugly unfortunate mess of history. + + +Readable.prototype.wrap = function (stream) { + var _this = this; + + var state = this._readableState; + var paused = false; + stream.on('end', function () { + debug('wrapped end'); + + if (state.decoder && !state.ended) { + var chunk = state.decoder.end(); + if (chunk && chunk.length) _this.push(chunk); + } + + _this.push(null); + }); + stream.on('data', function (chunk) { + debug('wrapped data'); + if (state.decoder) chunk = state.decoder.write(chunk); // don't skip over falsy values in objectMode + + if (state.objectMode && (chunk === null || chunk === undefined)) return;else if (!state.objectMode && (!chunk || !chunk.length)) return; + + var ret = _this.push(chunk); + + if (!ret) { + paused = true; + stream.pause(); + } + }); // proxy all the other methods. + // important when wrapping filters and duplexes. + + for (var i in stream) { + if (this[i] === undefined && typeof stream[i] === 'function') { + this[i] = function methodWrap(method) { + return function methodWrapReturnFunction() { + return stream[method].apply(stream, arguments); + }; + }(i); + } + } // proxy certain important events. + + + for (var n = 0; n < kProxyEvents.length; n++) { + stream.on(kProxyEvents[n], this.emit.bind(this, kProxyEvents[n])); + } // when we try to consume some more bytes, simply unpause the + // underlying stream. + + + this._read = function (n) { + debug('wrapped _read', n); + + if (paused) { + paused = false; + stream.resume(); + } + }; + + return this; +}; + +if (typeof Symbol === 'function') { + Readable.prototype[Symbol.asyncIterator] = function () { + if (createReadableStreamAsyncIterator === undefined) { + createReadableStreamAsyncIterator = __nccwpck_require__(97709); + } + + return createReadableStreamAsyncIterator(this); + }; +} + +Object.defineProperty(Readable.prototype, 'readableHighWaterMark', { + // making it explicit this property is not enumerable + // because otherwise some prototype manipulation in + // userland will fail + enumerable: false, + get: function get() { + return this._readableState.highWaterMark; + } +}); +Object.defineProperty(Readable.prototype, 'readableBuffer', { + // making it explicit this property is not enumerable + // because otherwise some prototype manipulation in + // userland will fail + enumerable: false, + get: function get() { + return this._readableState && this._readableState.buffer; + } +}); +Object.defineProperty(Readable.prototype, 'readableFlowing', { + // making it explicit this property is not enumerable + // because otherwise some prototype manipulation in + // userland will fail + enumerable: false, + get: function get() { + return this._readableState.flowing; + }, + set: function set(state) { + if (this._readableState) { + this._readableState.flowing = state; + } + } +}); // exposed for testing purposes only. + +Readable._fromList = fromList; +Object.defineProperty(Readable.prototype, 'readableLength', { + // making it explicit this property is not enumerable + // because otherwise some prototype manipulation in + // userland will fail + enumerable: false, + get: function get() { + return this._readableState.length; + } +}); // Pluck off n bytes from an array of buffers. +// Length is the combined lengths of all the buffers in the list. +// This function is designed to be inlinable, so please take care when making +// changes to the function body. + +function fromList(n, state) { + // nothing buffered + if (state.length === 0) return null; + var ret; + if (state.objectMode) ret = state.buffer.shift();else if (!n || n >= state.length) { + // read it all, truncate the list + if (state.decoder) ret = state.buffer.join('');else if (state.buffer.length === 1) ret = state.buffer.first();else ret = state.buffer.concat(state.length); + state.buffer.clear(); + } else { + // read part of list + ret = state.buffer.consume(n, state.decoder); + } + return ret; +} + +function endReadable(stream) { + var state = stream._readableState; + debug('endReadable', state.endEmitted); + + if (!state.endEmitted) { + state.ended = true; + process.nextTick(endReadableNT, state, stream); + } +} + +function endReadableNT(state, stream) { + debug('endReadableNT', state.endEmitted, state.length); // Check that we didn't get one last unshift. + + if (!state.endEmitted && state.length === 0) { + state.endEmitted = true; + stream.readable = false; + stream.emit('end'); + + if (state.autoDestroy) { + // In case of duplex streams we need a way to detect + // if the writable side is ready for autoDestroy as well + var wState = stream._writableState; + + if (!wState || wState.autoDestroy && wState.finished) { + stream.destroy(); + } + } + } +} + +if (typeof Symbol === 'function') { + Readable.from = function (iterable, opts) { + if (from === undefined) { + from = __nccwpck_require__(4996); + } + + return from(Readable, iterable, opts); + }; +} + +function indexOf(xs, x) { + for (var i = 0, l = xs.length; i < l; i++) { + if (xs[i] === x) return i; + } + + return -1; +} + +/***/ }), + +/***/ 38469: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +"use strict"; +// Copyright Joyent, Inc. and other Node contributors. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to permit +// persons to whom the Software is furnished to do so, subject to the +// following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +// USE OR OTHER DEALINGS IN THE SOFTWARE. +// a transform stream is a readable/writable stream where you do +// something with the data. Sometimes it's called a "filter", +// but that's not a great name for it, since that implies a thing where +// some bits pass through, and others are simply ignored. (That would +// be a valid example of a transform, of course.) +// +// While the output is causally related to the input, it's not a +// necessarily symmetric or synchronous transformation. For example, +// a zlib stream might take multiple plain-text writes(), and then +// emit a single compressed chunk some time in the future. +// +// Here's how this works: +// +// The Transform stream has all the aspects of the readable and writable +// stream classes. When you write(chunk), that calls _write(chunk,cb) +// internally, and returns false if there's a lot of pending writes +// buffered up. When you call read(), that calls _read(n) until +// there's enough pending readable data buffered up. +// +// In a transform stream, the written data is placed in a buffer. When +// _read(n) is called, it transforms the queued up data, calling the +// buffered _write cb's as it consumes chunks. If consuming a single +// written chunk would result in multiple output chunks, then the first +// outputted bit calls the readcb, and subsequent chunks just go into +// the read buffer, and will cause it to emit 'readable' if necessary. +// +// This way, back-pressure is actually determined by the reading side, +// since _read has to be called to start processing a new chunk. However, +// a pathological inflate type of transform can cause excessive buffering +// here. For example, imagine a stream where every byte of input is +// interpreted as an integer from 0-255, and then results in that many +// bytes of output. Writing the 4 bytes {ff,ff,ff,ff} would result in +// 1kb of data being output. In this case, you could write a very small +// amount of input, and end up with a very large amount of output. In +// such a pathological inflating mechanism, there'd be no way to tell +// the system to stop doing the transform. A single 4MB write could +// cause the system to run out of memory. +// +// However, even in such a pathological case, only a single written chunk +// would be consumed, and then the rest would wait (un-transformed) until +// the results of the previous transformed chunk were consumed. + + +module.exports = Transform; + +var _require$codes = (__nccwpck_require__(7190)/* .codes */ .q), + ERR_METHOD_NOT_IMPLEMENTED = _require$codes.ERR_METHOD_NOT_IMPLEMENTED, + ERR_MULTIPLE_CALLBACK = _require$codes.ERR_MULTIPLE_CALLBACK, + ERR_TRANSFORM_ALREADY_TRANSFORMING = _require$codes.ERR_TRANSFORM_ALREADY_TRANSFORMING, + ERR_TRANSFORM_WITH_LENGTH_0 = _require$codes.ERR_TRANSFORM_WITH_LENGTH_0; + +var Duplex = __nccwpck_require__(91107); + +__nccwpck_require__(79557)(Transform, Duplex); + +function afterTransform(er, data) { + var ts = this._transformState; + ts.transforming = false; + var cb = ts.writecb; + + if (cb === null) { + return this.emit('error', new ERR_MULTIPLE_CALLBACK()); + } + + ts.writechunk = null; + ts.writecb = null; + if (data != null) // single equals check for both `null` and `undefined` + this.push(data); + cb(er); + var rs = this._readableState; + rs.reading = false; + + if (rs.needReadable || rs.length < rs.highWaterMark) { + this._read(rs.highWaterMark); + } +} + +function Transform(options) { + if (!(this instanceof Transform)) return new Transform(options); + Duplex.call(this, options); + this._transformState = { + afterTransform: afterTransform.bind(this), + needTransform: false, + transforming: false, + writecb: null, + writechunk: null, + writeencoding: null + }; // start out asking for a readable event once data is transformed. + + this._readableState.needReadable = true; // we have implemented the _read method, and done the other things + // that Readable wants before the first _read call, so unset the + // sync guard flag. + + this._readableState.sync = false; + + if (options) { + if (typeof options.transform === 'function') this._transform = options.transform; + if (typeof options.flush === 'function') this._flush = options.flush; + } // When the writable side finishes, then flush out anything remaining. + + + this.on('prefinish', prefinish); +} + +function prefinish() { + var _this = this; + + if (typeof this._flush === 'function' && !this._readableState.destroyed) { + this._flush(function (er, data) { + done(_this, er, data); + }); + } else { + done(this, null, null); + } +} + +Transform.prototype.push = function (chunk, encoding) { + this._transformState.needTransform = false; + return Duplex.prototype.push.call(this, chunk, encoding); +}; // This is the part where you do stuff! +// override this function in implementation classes. +// 'chunk' is an input chunk. +// +// Call `push(newChunk)` to pass along transformed output +// to the readable side. You may call 'push' zero or more times. +// +// Call `cb(err)` when you are done with this chunk. If you pass +// an error, then that'll put the hurt on the whole operation. If you +// never call cb(), then you'll never get another chunk. + + +Transform.prototype._transform = function (chunk, encoding, cb) { + cb(new ERR_METHOD_NOT_IMPLEMENTED('_transform()')); +}; + +Transform.prototype._write = function (chunk, encoding, cb) { + var ts = this._transformState; + ts.writecb = cb; + ts.writechunk = chunk; + ts.writeencoding = encoding; + + if (!ts.transforming) { + var rs = this._readableState; + if (ts.needTransform || rs.needReadable || rs.length < rs.highWaterMark) this._read(rs.highWaterMark); + } +}; // Doesn't matter what the args are here. +// _transform does all the work. +// That we got here means that the readable side wants more data. + + +Transform.prototype._read = function (n) { + var ts = this._transformState; + + if (ts.writechunk !== null && !ts.transforming) { + ts.transforming = true; + + this._transform(ts.writechunk, ts.writeencoding, ts.afterTransform); + } else { + // mark that we need a transform, so that any data that comes in + // will get processed, now that we've asked for it. + ts.needTransform = true; + } +}; + +Transform.prototype._destroy = function (err, cb) { + Duplex.prototype._destroy.call(this, err, function (err2) { + cb(err2); + }); +}; + +function done(stream, er, data) { + if (er) return stream.emit('error', er); + if (data != null) // single equals check for both `null` and `undefined` + stream.push(data); // TODO(BridgeAR): Write a test for these two error cases + // if there's nothing in the write buffer, then that means + // that nothing more will ever be provided + + if (stream._writableState.length) throw new ERR_TRANSFORM_WITH_LENGTH_0(); + if (stream._transformState.transforming) throw new ERR_TRANSFORM_ALREADY_TRANSFORMING(); + return stream.push(null); +} + +/***/ }), + +/***/ 56573: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +"use strict"; +// Copyright Joyent, Inc. and other Node contributors. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to permit +// persons to whom the Software is furnished to do so, subject to the +// following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +// USE OR OTHER DEALINGS IN THE SOFTWARE. +// A bit simpler than readable streams. +// Implement an async ._write(chunk, encoding, cb), and it'll handle all +// the drain event emission and buffering. + + +module.exports = Writable; +/* */ + +function WriteReq(chunk, encoding, cb) { + this.chunk = chunk; + this.encoding = encoding; + this.callback = cb; + this.next = null; +} // It seems a linked list but it is not +// there will be only 2 of these for each stream + + +function CorkedRequest(state) { + var _this = this; + + this.next = null; + this.entry = null; + + this.finish = function () { + onCorkedFinish(_this, state); + }; +} +/* */ + +/**/ + + +var Duplex; +/**/ + +Writable.WritableState = WritableState; +/**/ + +var internalUtil = { + deprecate: __nccwpck_require__(70620) +}; +/**/ + +/**/ + +var Stream = __nccwpck_require__(49995); +/**/ + + +var Buffer = (__nccwpck_require__(14300).Buffer); + +var OurUint8Array = global.Uint8Array || function () {}; + +function _uint8ArrayToBuffer(chunk) { + return Buffer.from(chunk); +} + +function _isUint8Array(obj) { + return Buffer.isBuffer(obj) || obj instanceof OurUint8Array; +} + +var destroyImpl = __nccwpck_require__(33844); + +var _require = __nccwpck_require__(95408), + getHighWaterMark = _require.getHighWaterMark; + +var _require$codes = (__nccwpck_require__(7190)/* .codes */ .q), + ERR_INVALID_ARG_TYPE = _require$codes.ERR_INVALID_ARG_TYPE, + ERR_METHOD_NOT_IMPLEMENTED = _require$codes.ERR_METHOD_NOT_IMPLEMENTED, + ERR_MULTIPLE_CALLBACK = _require$codes.ERR_MULTIPLE_CALLBACK, + ERR_STREAM_CANNOT_PIPE = _require$codes.ERR_STREAM_CANNOT_PIPE, + ERR_STREAM_DESTROYED = _require$codes.ERR_STREAM_DESTROYED, + ERR_STREAM_NULL_VALUES = _require$codes.ERR_STREAM_NULL_VALUES, + ERR_STREAM_WRITE_AFTER_END = _require$codes.ERR_STREAM_WRITE_AFTER_END, + ERR_UNKNOWN_ENCODING = _require$codes.ERR_UNKNOWN_ENCODING; + +var errorOrDestroy = destroyImpl.errorOrDestroy; + +__nccwpck_require__(79557)(Writable, Stream); + +function nop() {} + +function WritableState(options, stream, isDuplex) { + Duplex = Duplex || __nccwpck_require__(91107); + options = options || {}; // Duplex streams are both readable and writable, but share + // the same options object. + // However, some cases require setting options to different + // values for the readable and the writable sides of the duplex stream, + // e.g. options.readableObjectMode vs. options.writableObjectMode, etc. + + if (typeof isDuplex !== 'boolean') isDuplex = stream instanceof Duplex; // object stream flag to indicate whether or not this stream + // contains buffers or objects. + + this.objectMode = !!options.objectMode; + if (isDuplex) this.objectMode = this.objectMode || !!options.writableObjectMode; // the point at which write() starts returning false + // Note: 0 is a valid value, means that we always return false if + // the entire buffer is not flushed immediately on write() + + this.highWaterMark = getHighWaterMark(this, options, 'writableHighWaterMark', isDuplex); // if _final has been called + + this.finalCalled = false; // drain event flag. + + this.needDrain = false; // at the start of calling end() + + this.ending = false; // when end() has been called, and returned + + this.ended = false; // when 'finish' is emitted + + this.finished = false; // has it been destroyed + + this.destroyed = false; // should we decode strings into buffers before passing to _write? + // this is here so that some node-core streams can optimize string + // handling at a lower level. + + var noDecode = options.decodeStrings === false; + this.decodeStrings = !noDecode; // Crypto is kind of old and crusty. Historically, its default string + // encoding is 'binary' so we have to make this configurable. + // Everything else in the universe uses 'utf8', though. + + this.defaultEncoding = options.defaultEncoding || 'utf8'; // not an actual buffer we keep track of, but a measurement + // of how much we're waiting to get pushed to some underlying + // socket or file. + + this.length = 0; // a flag to see when we're in the middle of a write. + + this.writing = false; // when true all writes will be buffered until .uncork() call + + this.corked = 0; // a flag to be able to tell if the onwrite cb is called immediately, + // or on a later tick. We set this to true at first, because any + // actions that shouldn't happen until "later" should generally also + // not happen before the first write call. + + this.sync = true; // a flag to know if we're processing previously buffered items, which + // may call the _write() callback in the same tick, so that we don't + // end up in an overlapped onwrite situation. + + this.bufferProcessing = false; // the callback that's passed to _write(chunk,cb) + + this.onwrite = function (er) { + onwrite(stream, er); + }; // the callback that the user supplies to write(chunk,encoding,cb) + + + this.writecb = null; // the amount that is being written when _write is called. + + this.writelen = 0; + this.bufferedRequest = null; + this.lastBufferedRequest = null; // number of pending user-supplied write callbacks + // this must be 0 before 'finish' can be emitted + + this.pendingcb = 0; // emit prefinish if the only thing we're waiting for is _write cbs + // This is relevant for synchronous Transform streams + + this.prefinished = false; // True if the error was already emitted and should not be thrown again + + this.errorEmitted = false; // Should close be emitted on destroy. Defaults to true. + + this.emitClose = options.emitClose !== false; // Should .destroy() be called after 'finish' (and potentially 'end') + + this.autoDestroy = !!options.autoDestroy; // count buffered requests + + this.bufferedRequestCount = 0; // allocate the first CorkedRequest, there is always + // one allocated and free to use, and we maintain at most two + + this.corkedRequestsFree = new CorkedRequest(this); +} + +WritableState.prototype.getBuffer = function getBuffer() { + var current = this.bufferedRequest; + var out = []; + + while (current) { + out.push(current); + current = current.next; + } + + return out; +}; + +(function () { + try { + Object.defineProperty(WritableState.prototype, 'buffer', { + get: internalUtil.deprecate(function writableStateBufferGetter() { + return this.getBuffer(); + }, '_writableState.buffer is deprecated. Use _writableState.getBuffer ' + 'instead.', 'DEP0003') + }); + } catch (_) {} +})(); // Test _writableState for inheritance to account for Duplex streams, +// whose prototype chain only points to Readable. + + +var realHasInstance; + +if (typeof Symbol === 'function' && Symbol.hasInstance && typeof Function.prototype[Symbol.hasInstance] === 'function') { + realHasInstance = Function.prototype[Symbol.hasInstance]; + Object.defineProperty(Writable, Symbol.hasInstance, { + value: function value(object) { + if (realHasInstance.call(this, object)) return true; + if (this !== Writable) return false; + return object && object._writableState instanceof WritableState; + } + }); +} else { + realHasInstance = function realHasInstance(object) { + return object instanceof this; + }; +} + +function Writable(options) { + Duplex = Duplex || __nccwpck_require__(91107); // Writable ctor is applied to Duplexes, too. + // `realHasInstance` is necessary because using plain `instanceof` + // would return false, as no `_writableState` property is attached. + // Trying to use the custom `instanceof` for Writable here will also break the + // Node.js LazyTransform implementation, which has a non-trivial getter for + // `_writableState` that would lead to infinite recursion. + // Checking for a Stream.Duplex instance is faster here instead of inside + // the WritableState constructor, at least with V8 6.5 + + var isDuplex = this instanceof Duplex; + if (!isDuplex && !realHasInstance.call(Writable, this)) return new Writable(options); + this._writableState = new WritableState(options, this, isDuplex); // legacy. + + this.writable = true; + + if (options) { + if (typeof options.write === 'function') this._write = options.write; + if (typeof options.writev === 'function') this._writev = options.writev; + if (typeof options.destroy === 'function') this._destroy = options.destroy; + if (typeof options.final === 'function') this._final = options.final; + } + + Stream.call(this); +} // Otherwise people can pipe Writable streams, which is just wrong. + + +Writable.prototype.pipe = function () { + errorOrDestroy(this, new ERR_STREAM_CANNOT_PIPE()); +}; + +function writeAfterEnd(stream, cb) { + var er = new ERR_STREAM_WRITE_AFTER_END(); // TODO: defer error events consistently everywhere, not just the cb + + errorOrDestroy(stream, er); + process.nextTick(cb, er); +} // Checks that a user-supplied chunk is valid, especially for the particular +// mode the stream is in. Currently this means that `null` is never accepted +// and undefined/non-string values are only allowed in object mode. + + +function validChunk(stream, state, chunk, cb) { + var er; + + if (chunk === null) { + er = new ERR_STREAM_NULL_VALUES(); + } else if (typeof chunk !== 'string' && !state.objectMode) { + er = new ERR_INVALID_ARG_TYPE('chunk', ['string', 'Buffer'], chunk); + } + + if (er) { + errorOrDestroy(stream, er); + process.nextTick(cb, er); + return false; + } + + return true; +} + +Writable.prototype.write = function (chunk, encoding, cb) { + var state = this._writableState; + var ret = false; + + var isBuf = !state.objectMode && _isUint8Array(chunk); + + if (isBuf && !Buffer.isBuffer(chunk)) { + chunk = _uint8ArrayToBuffer(chunk); + } + + if (typeof encoding === 'function') { + cb = encoding; + encoding = null; + } + + if (isBuf) encoding = 'buffer';else if (!encoding) encoding = state.defaultEncoding; + if (typeof cb !== 'function') cb = nop; + if (state.ending) writeAfterEnd(this, cb);else if (isBuf || validChunk(this, state, chunk, cb)) { + state.pendingcb++; + ret = writeOrBuffer(this, state, isBuf, chunk, encoding, cb); + } + return ret; +}; + +Writable.prototype.cork = function () { + this._writableState.corked++; +}; + +Writable.prototype.uncork = function () { + var state = this._writableState; + + if (state.corked) { + state.corked--; + if (!state.writing && !state.corked && !state.bufferProcessing && state.bufferedRequest) clearBuffer(this, state); + } +}; + +Writable.prototype.setDefaultEncoding = function setDefaultEncoding(encoding) { + // node::ParseEncoding() requires lower case. + if (typeof encoding === 'string') encoding = encoding.toLowerCase(); + if (!(['hex', 'utf8', 'utf-8', 'ascii', 'binary', 'base64', 'ucs2', 'ucs-2', 'utf16le', 'utf-16le', 'raw'].indexOf((encoding + '').toLowerCase()) > -1)) throw new ERR_UNKNOWN_ENCODING(encoding); + this._writableState.defaultEncoding = encoding; + return this; +}; + +Object.defineProperty(Writable.prototype, 'writableBuffer', { + // making it explicit this property is not enumerable + // because otherwise some prototype manipulation in + // userland will fail + enumerable: false, + get: function get() { + return this._writableState && this._writableState.getBuffer(); + } +}); + +function decodeChunk(state, chunk, encoding) { + if (!state.objectMode && state.decodeStrings !== false && typeof chunk === 'string') { + chunk = Buffer.from(chunk, encoding); + } + + return chunk; +} + +Object.defineProperty(Writable.prototype, 'writableHighWaterMark', { + // making it explicit this property is not enumerable + // because otherwise some prototype manipulation in + // userland will fail + enumerable: false, + get: function get() { + return this._writableState.highWaterMark; + } +}); // if we're already writing something, then just put this +// in the queue, and wait our turn. Otherwise, call _write +// If we return false, then we need a drain event, so set that flag. + +function writeOrBuffer(stream, state, isBuf, chunk, encoding, cb) { + if (!isBuf) { + var newChunk = decodeChunk(state, chunk, encoding); + + if (chunk !== newChunk) { + isBuf = true; + encoding = 'buffer'; + chunk = newChunk; + } + } + + var len = state.objectMode ? 1 : chunk.length; + state.length += len; + var ret = state.length < state.highWaterMark; // we must ensure that previous needDrain will not be reset to false. + + if (!ret) state.needDrain = true; + + if (state.writing || state.corked) { + var last = state.lastBufferedRequest; + state.lastBufferedRequest = { + chunk: chunk, + encoding: encoding, + isBuf: isBuf, + callback: cb, + next: null + }; + + if (last) { + last.next = state.lastBufferedRequest; + } else { + state.bufferedRequest = state.lastBufferedRequest; + } + + state.bufferedRequestCount += 1; + } else { + doWrite(stream, state, false, len, chunk, encoding, cb); + } + + return ret; +} + +function doWrite(stream, state, writev, len, chunk, encoding, cb) { + state.writelen = len; + state.writecb = cb; + state.writing = true; + state.sync = true; + if (state.destroyed) state.onwrite(new ERR_STREAM_DESTROYED('write'));else if (writev) stream._writev(chunk, state.onwrite);else stream._write(chunk, encoding, state.onwrite); + state.sync = false; +} + +function onwriteError(stream, state, sync, er, cb) { + --state.pendingcb; + + if (sync) { + // defer the callback if we are being called synchronously + // to avoid piling up things on the stack + process.nextTick(cb, er); // this can emit finish, and it will always happen + // after error + + process.nextTick(finishMaybe, stream, state); + stream._writableState.errorEmitted = true; + errorOrDestroy(stream, er); + } else { + // the caller expect this to happen before if + // it is async + cb(er); + stream._writableState.errorEmitted = true; + errorOrDestroy(stream, er); // this can emit finish, but finish must + // always follow error + + finishMaybe(stream, state); + } +} + +function onwriteStateUpdate(state) { + state.writing = false; + state.writecb = null; + state.length -= state.writelen; + state.writelen = 0; +} + +function onwrite(stream, er) { + var state = stream._writableState; + var sync = state.sync; + var cb = state.writecb; + if (typeof cb !== 'function') throw new ERR_MULTIPLE_CALLBACK(); + onwriteStateUpdate(state); + if (er) onwriteError(stream, state, sync, er, cb);else { + // Check if we're actually ready to finish, but don't emit yet + var finished = needFinish(state) || stream.destroyed; + + if (!finished && !state.corked && !state.bufferProcessing && state.bufferedRequest) { + clearBuffer(stream, state); + } + + if (sync) { + process.nextTick(afterWrite, stream, state, finished, cb); + } else { + afterWrite(stream, state, finished, cb); + } + } +} + +function afterWrite(stream, state, finished, cb) { + if (!finished) onwriteDrain(stream, state); + state.pendingcb--; + cb(); + finishMaybe(stream, state); +} // Must force callback to be called on nextTick, so that we don't +// emit 'drain' before the write() consumer gets the 'false' return +// value, and has a chance to attach a 'drain' listener. + + +function onwriteDrain(stream, state) { + if (state.length === 0 && state.needDrain) { + state.needDrain = false; + stream.emit('drain'); + } +} // if there's something in the buffer waiting, then process it + + +function clearBuffer(stream, state) { + state.bufferProcessing = true; + var entry = state.bufferedRequest; + + if (stream._writev && entry && entry.next) { + // Fast case, write everything using _writev() + var l = state.bufferedRequestCount; + var buffer = new Array(l); + var holder = state.corkedRequestsFree; + holder.entry = entry; + var count = 0; + var allBuffers = true; + + while (entry) { + buffer[count] = entry; + if (!entry.isBuf) allBuffers = false; + entry = entry.next; + count += 1; + } + + buffer.allBuffers = allBuffers; + doWrite(stream, state, true, state.length, buffer, '', holder.finish); // doWrite is almost always async, defer these to save a bit of time + // as the hot path ends with doWrite + + state.pendingcb++; + state.lastBufferedRequest = null; + + if (holder.next) { + state.corkedRequestsFree = holder.next; + holder.next = null; + } else { + state.corkedRequestsFree = new CorkedRequest(state); + } + + state.bufferedRequestCount = 0; + } else { + // Slow case, write chunks one-by-one + while (entry) { + var chunk = entry.chunk; + var encoding = entry.encoding; + var cb = entry.callback; + var len = state.objectMode ? 1 : chunk.length; + doWrite(stream, state, false, len, chunk, encoding, cb); + entry = entry.next; + state.bufferedRequestCount--; // if we didn't call the onwrite immediately, then + // it means that we need to wait until it does. + // also, that means that the chunk and cb are currently + // being processed, so move the buffer counter past them. + + if (state.writing) { + break; + } + } + + if (entry === null) state.lastBufferedRequest = null; + } + + state.bufferedRequest = entry; + state.bufferProcessing = false; +} + +Writable.prototype._write = function (chunk, encoding, cb) { + cb(new ERR_METHOD_NOT_IMPLEMENTED('_write()')); +}; + +Writable.prototype._writev = null; + +Writable.prototype.end = function (chunk, encoding, cb) { + var state = this._writableState; + + if (typeof chunk === 'function') { + cb = chunk; + chunk = null; + encoding = null; + } else if (typeof encoding === 'function') { + cb = encoding; + encoding = null; + } + + if (chunk !== null && chunk !== undefined) this.write(chunk, encoding); // .end() fully uncorks + + if (state.corked) { + state.corked = 1; + this.uncork(); + } // ignore unnecessary end() calls. + + + if (!state.ending) endWritable(this, state, cb); + return this; +}; + +Object.defineProperty(Writable.prototype, 'writableLength', { + // making it explicit this property is not enumerable + // because otherwise some prototype manipulation in + // userland will fail + enumerable: false, + get: function get() { + return this._writableState.length; + } +}); + +function needFinish(state) { + return state.ending && state.length === 0 && state.bufferedRequest === null && !state.finished && !state.writing; +} + +function callFinal(stream, state) { + stream._final(function (err) { + state.pendingcb--; + + if (err) { + errorOrDestroy(stream, err); + } + + state.prefinished = true; + stream.emit('prefinish'); + finishMaybe(stream, state); + }); +} + +function prefinish(stream, state) { + if (!state.prefinished && !state.finalCalled) { + if (typeof stream._final === 'function' && !state.destroyed) { + state.pendingcb++; + state.finalCalled = true; + process.nextTick(callFinal, stream, state); + } else { + state.prefinished = true; + stream.emit('prefinish'); + } + } +} + +function finishMaybe(stream, state) { + var need = needFinish(state); + + if (need) { + prefinish(stream, state); + + if (state.pendingcb === 0) { + state.finished = true; + stream.emit('finish'); + + if (state.autoDestroy) { + // In case of duplex streams we need a way to detect + // if the readable side is ready for autoDestroy as well + var rState = stream._readableState; + + if (!rState || rState.autoDestroy && rState.endEmitted) { + stream.destroy(); + } + } + } + } + + return need; +} + +function endWritable(stream, state, cb) { + state.ending = true; + finishMaybe(stream, state); + + if (cb) { + if (state.finished) process.nextTick(cb);else stream.once('finish', cb); + } + + state.ended = true; + stream.writable = false; +} + +function onCorkedFinish(corkReq, state, err) { + var entry = corkReq.entry; + corkReq.entry = null; + + while (entry) { + var cb = entry.callback; + state.pendingcb--; + cb(err); + entry = entry.next; + } // reuse the free corkReq. + + + state.corkedRequestsFree.next = corkReq; +} + +Object.defineProperty(Writable.prototype, 'destroyed', { + // making it explicit this property is not enumerable + // because otherwise some prototype manipulation in + // userland will fail + enumerable: false, + get: function get() { + if (this._writableState === undefined) { + return false; + } + + return this._writableState.destroyed; + }, + set: function set(value) { + // we ignore the value if the stream + // has not been initialized yet + if (!this._writableState) { + return; + } // backward compatibility, the user is explicitly + // managing destroyed + + + this._writableState.destroyed = value; + } +}); +Writable.prototype.destroy = destroyImpl.destroy; +Writable.prototype._undestroy = destroyImpl.undestroy; + +Writable.prototype._destroy = function (err, cb) { + cb(err); +}; + +/***/ }), + +/***/ 97709: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +"use strict"; + + +var _Object$setPrototypeO; + +function _defineProperty(obj, key, value) { if (key in obj) { Object.defineProperty(obj, key, { value: value, enumerable: true, configurable: true, writable: true }); } else { obj[key] = value; } return obj; } + +var finished = __nccwpck_require__(60193); + +var kLastResolve = Symbol('lastResolve'); +var kLastReject = Symbol('lastReject'); +var kError = Symbol('error'); +var kEnded = Symbol('ended'); +var kLastPromise = Symbol('lastPromise'); +var kHandlePromise = Symbol('handlePromise'); +var kStream = Symbol('stream'); + +function createIterResult(value, done) { + return { + value: value, + done: done + }; +} + +function readAndResolve(iter) { + var resolve = iter[kLastResolve]; + + if (resolve !== null) { + var data = iter[kStream].read(); // we defer if data is null + // we can be expecting either 'end' or + // 'error' + + if (data !== null) { + iter[kLastPromise] = null; + iter[kLastResolve] = null; + iter[kLastReject] = null; + resolve(createIterResult(data, false)); + } + } +} + +function onReadable(iter) { + // we wait for the next tick, because it might + // emit an error with process.nextTick + process.nextTick(readAndResolve, iter); +} + +function wrapForNext(lastPromise, iter) { + return function (resolve, reject) { + lastPromise.then(function () { + if (iter[kEnded]) { + resolve(createIterResult(undefined, true)); + return; + } + + iter[kHandlePromise](resolve, reject); + }, reject); + }; +} + +var AsyncIteratorPrototype = Object.getPrototypeOf(function () {}); +var ReadableStreamAsyncIteratorPrototype = Object.setPrototypeOf((_Object$setPrototypeO = { + get stream() { + return this[kStream]; + }, + + next: function next() { + var _this = this; + + // if we have detected an error in the meanwhile + // reject straight away + var error = this[kError]; + + if (error !== null) { + return Promise.reject(error); + } + + if (this[kEnded]) { + return Promise.resolve(createIterResult(undefined, true)); + } + + if (this[kStream].destroyed) { + // We need to defer via nextTick because if .destroy(err) is + // called, the error will be emitted via nextTick, and + // we cannot guarantee that there is no error lingering around + // waiting to be emitted. + return new Promise(function (resolve, reject) { + process.nextTick(function () { + if (_this[kError]) { + reject(_this[kError]); + } else { + resolve(createIterResult(undefined, true)); + } + }); + }); + } // if we have multiple next() calls + // we will wait for the previous Promise to finish + // this logic is optimized to support for await loops, + // where next() is only called once at a time + + + var lastPromise = this[kLastPromise]; + var promise; + + if (lastPromise) { + promise = new Promise(wrapForNext(lastPromise, this)); + } else { + // fast path needed to support multiple this.push() + // without triggering the next() queue + var data = this[kStream].read(); + + if (data !== null) { + return Promise.resolve(createIterResult(data, false)); + } + + promise = new Promise(this[kHandlePromise]); + } + + this[kLastPromise] = promise; + return promise; + } +}, _defineProperty(_Object$setPrototypeO, Symbol.asyncIterator, function () { + return this; +}), _defineProperty(_Object$setPrototypeO, "return", function _return() { + var _this2 = this; + + // destroy(err, cb) is a private API + // we can guarantee we have that here, because we control the + // Readable class this is attached to + return new Promise(function (resolve, reject) { + _this2[kStream].destroy(null, function (err) { + if (err) { + reject(err); + return; + } + + resolve(createIterResult(undefined, true)); + }); + }); +}), _Object$setPrototypeO), AsyncIteratorPrototype); + +var createReadableStreamAsyncIterator = function createReadableStreamAsyncIterator(stream) { + var _Object$create; + + var iterator = Object.create(ReadableStreamAsyncIteratorPrototype, (_Object$create = {}, _defineProperty(_Object$create, kStream, { + value: stream, + writable: true + }), _defineProperty(_Object$create, kLastResolve, { + value: null, + writable: true + }), _defineProperty(_Object$create, kLastReject, { + value: null, + writable: true + }), _defineProperty(_Object$create, kError, { + value: null, + writable: true + }), _defineProperty(_Object$create, kEnded, { + value: stream._readableState.endEmitted, + writable: true + }), _defineProperty(_Object$create, kHandlePromise, { + value: function value(resolve, reject) { + var data = iterator[kStream].read(); + + if (data) { + iterator[kLastPromise] = null; + iterator[kLastResolve] = null; + iterator[kLastReject] = null; + resolve(createIterResult(data, false)); + } else { + iterator[kLastResolve] = resolve; + iterator[kLastReject] = reject; + } + }, + writable: true + }), _Object$create)); + iterator[kLastPromise] = null; + finished(stream, function (err) { + if (err && err.code !== 'ERR_STREAM_PREMATURE_CLOSE') { + var reject = iterator[kLastReject]; // reject if we are waiting for data in the Promise + // returned by next() and store the error + + if (reject !== null) { + iterator[kLastPromise] = null; + iterator[kLastResolve] = null; + iterator[kLastReject] = null; + reject(err); + } + + iterator[kError] = err; + return; + } + + var resolve = iterator[kLastResolve]; + + if (resolve !== null) { + iterator[kLastPromise] = null; + iterator[kLastResolve] = null; + iterator[kLastReject] = null; + resolve(createIterResult(undefined, true)); + } + + iterator[kEnded] = true; + }); + stream.on('readable', onReadable.bind(null, iterator)); + return iterator; +}; + +module.exports = createReadableStreamAsyncIterator; + +/***/ }), + +/***/ 30168: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +"use strict"; + + +function ownKeys(object, enumerableOnly) { var keys = Object.keys(object); if (Object.getOwnPropertySymbols) { var symbols = Object.getOwnPropertySymbols(object); if (enumerableOnly) symbols = symbols.filter(function (sym) { return Object.getOwnPropertyDescriptor(object, sym).enumerable; }); keys.push.apply(keys, symbols); } return keys; } + +function _objectSpread(target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i] != null ? arguments[i] : {}; if (i % 2) { ownKeys(Object(source), true).forEach(function (key) { _defineProperty(target, key, source[key]); }); } else if (Object.getOwnPropertyDescriptors) { Object.defineProperties(target, Object.getOwnPropertyDescriptors(source)); } else { ownKeys(Object(source)).forEach(function (key) { Object.defineProperty(target, key, Object.getOwnPropertyDescriptor(source, key)); }); } } return target; } + +function _defineProperty(obj, key, value) { if (key in obj) { Object.defineProperty(obj, key, { value: value, enumerable: true, configurable: true, writable: true }); } else { obj[key] = value; } return obj; } + +function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } } + +function _defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } + +function _createClass(Constructor, protoProps, staticProps) { if (protoProps) _defineProperties(Constructor.prototype, protoProps); if (staticProps) _defineProperties(Constructor, staticProps); return Constructor; } + +var _require = __nccwpck_require__(14300), + Buffer = _require.Buffer; + +var _require2 = __nccwpck_require__(73837), + inspect = _require2.inspect; + +var custom = inspect && inspect.custom || 'inspect'; + +function copyBuffer(src, target, offset) { + Buffer.prototype.copy.call(src, target, offset); +} + +module.exports = +/*#__PURE__*/ +function () { + function BufferList() { + _classCallCheck(this, BufferList); + + this.head = null; + this.tail = null; + this.length = 0; + } + + _createClass(BufferList, [{ + key: "push", + value: function push(v) { + var entry = { + data: v, + next: null + }; + if (this.length > 0) this.tail.next = entry;else this.head = entry; + this.tail = entry; + ++this.length; + } + }, { + key: "unshift", + value: function unshift(v) { + var entry = { + data: v, + next: this.head + }; + if (this.length === 0) this.tail = entry; + this.head = entry; + ++this.length; + } + }, { + key: "shift", + value: function shift() { + if (this.length === 0) return; + var ret = this.head.data; + if (this.length === 1) this.head = this.tail = null;else this.head = this.head.next; + --this.length; + return ret; + } + }, { + key: "clear", + value: function clear() { + this.head = this.tail = null; + this.length = 0; + } + }, { + key: "join", + value: function join(s) { + if (this.length === 0) return ''; + var p = this.head; + var ret = '' + p.data; + + while (p = p.next) { + ret += s + p.data; + } + + return ret; + } + }, { + key: "concat", + value: function concat(n) { + if (this.length === 0) return Buffer.alloc(0); + var ret = Buffer.allocUnsafe(n >>> 0); + var p = this.head; + var i = 0; + + while (p) { + copyBuffer(p.data, ret, i); + i += p.data.length; + p = p.next; + } + + return ret; + } // Consumes a specified amount of bytes or characters from the buffered data. + + }, { + key: "consume", + value: function consume(n, hasStrings) { + var ret; + + if (n < this.head.data.length) { + // `slice` is the same for buffers and strings. + ret = this.head.data.slice(0, n); + this.head.data = this.head.data.slice(n); + } else if (n === this.head.data.length) { + // First chunk is a perfect match. + ret = this.shift(); + } else { + // Result spans more than one buffer. + ret = hasStrings ? this._getString(n) : this._getBuffer(n); + } + + return ret; + } + }, { + key: "first", + value: function first() { + return this.head.data; + } // Consumes a specified amount of characters from the buffered data. + + }, { + key: "_getString", + value: function _getString(n) { + var p = this.head; + var c = 1; + var ret = p.data; + n -= ret.length; + + while (p = p.next) { + var str = p.data; + var nb = n > str.length ? str.length : n; + if (nb === str.length) ret += str;else ret += str.slice(0, n); + n -= nb; + + if (n === 0) { + if (nb === str.length) { + ++c; + if (p.next) this.head = p.next;else this.head = this.tail = null; + } else { + this.head = p; + p.data = str.slice(nb); + } + + break; + } + + ++c; + } + + this.length -= c; + return ret; + } // Consumes a specified amount of bytes from the buffered data. + + }, { + key: "_getBuffer", + value: function _getBuffer(n) { + var ret = Buffer.allocUnsafe(n); + var p = this.head; + var c = 1; + p.data.copy(ret); + n -= p.data.length; + + while (p = p.next) { + var buf = p.data; + var nb = n > buf.length ? buf.length : n; + buf.copy(ret, ret.length - n, 0, nb); + n -= nb; + + if (n === 0) { + if (nb === buf.length) { + ++c; + if (p.next) this.head = p.next;else this.head = this.tail = null; + } else { + this.head = p; + p.data = buf.slice(nb); + } + + break; + } + + ++c; + } + + this.length -= c; + return ret; + } // Make sure the linked list only shows the minimal necessary information. + + }, { + key: custom, + value: function value(_, options) { + return inspect(this, _objectSpread({}, options, { + // Only inspect one level. + depth: 0, + // It should not recurse. + customInspect: false + })); + } + }]); + + return BufferList; +}(); + +/***/ }), + +/***/ 33844: +/***/ ((module) => { + +"use strict"; + // undocumented cb() API, needed for core, not for public API + +function destroy(err, cb) { + var _this = this; + + var readableDestroyed = this._readableState && this._readableState.destroyed; + var writableDestroyed = this._writableState && this._writableState.destroyed; + + if (readableDestroyed || writableDestroyed) { + if (cb) { + cb(err); + } else if (err) { + if (!this._writableState) { + process.nextTick(emitErrorNT, this, err); + } else if (!this._writableState.errorEmitted) { + this._writableState.errorEmitted = true; + process.nextTick(emitErrorNT, this, err); + } + } + + return this; + } // we set destroyed to true before firing error callbacks in order + // to make it re-entrance safe in case destroy() is called within callbacks + + + if (this._readableState) { + this._readableState.destroyed = true; + } // if this is a duplex stream mark the writable part as destroyed as well + + + if (this._writableState) { + this._writableState.destroyed = true; + } + + this._destroy(err || null, function (err) { + if (!cb && err) { + if (!_this._writableState) { + process.nextTick(emitErrorAndCloseNT, _this, err); + } else if (!_this._writableState.errorEmitted) { + _this._writableState.errorEmitted = true; + process.nextTick(emitErrorAndCloseNT, _this, err); + } else { + process.nextTick(emitCloseNT, _this); + } + } else if (cb) { + process.nextTick(emitCloseNT, _this); + cb(err); + } else { + process.nextTick(emitCloseNT, _this); + } + }); + + return this; +} + +function emitErrorAndCloseNT(self, err) { + emitErrorNT(self, err); + emitCloseNT(self); +} + +function emitCloseNT(self) { + if (self._writableState && !self._writableState.emitClose) return; + if (self._readableState && !self._readableState.emitClose) return; + self.emit('close'); +} + +function undestroy() { + if (this._readableState) { + this._readableState.destroyed = false; + this._readableState.reading = false; + this._readableState.ended = false; + this._readableState.endEmitted = false; + } + + if (this._writableState) { + this._writableState.destroyed = false; + this._writableState.ended = false; + this._writableState.ending = false; + this._writableState.finalCalled = false; + this._writableState.prefinished = false; + this._writableState.finished = false; + this._writableState.errorEmitted = false; + } +} + +function emitErrorNT(self, err) { + self.emit('error', err); +} + +function errorOrDestroy(stream, err) { + // We have tests that rely on errors being emitted + // in the same tick, so changing this is semver major. + // For now when you opt-in to autoDestroy we allow + // the error to be emitted nextTick. In a future + // semver major update we should change the default to this. + var rState = stream._readableState; + var wState = stream._writableState; + if (rState && rState.autoDestroy || wState && wState.autoDestroy) stream.destroy(err);else stream.emit('error', err); +} + +module.exports = { + destroy: destroy, + undestroy: undestroy, + errorOrDestroy: errorOrDestroy +}; + +/***/ }), + +/***/ 60193: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +"use strict"; +// Ported from https://github.com/mafintosh/end-of-stream with +// permission from the author, Mathias Buus (@mafintosh). + + +var ERR_STREAM_PREMATURE_CLOSE = (__nccwpck_require__(7190)/* .codes.ERR_STREAM_PREMATURE_CLOSE */ .q.ERR_STREAM_PREMATURE_CLOSE); + +function once(callback) { + var called = false; + return function () { + if (called) return; + called = true; + + for (var _len = arguments.length, args = new Array(_len), _key = 0; _key < _len; _key++) { + args[_key] = arguments[_key]; + } + + callback.apply(this, args); + }; +} + +function noop() {} + +function isRequest(stream) { + return stream.setHeader && typeof stream.abort === 'function'; +} + +function eos(stream, opts, callback) { + if (typeof opts === 'function') return eos(stream, null, opts); + if (!opts) opts = {}; + callback = once(callback || noop); + var readable = opts.readable || opts.readable !== false && stream.readable; + var writable = opts.writable || opts.writable !== false && stream.writable; + + var onlegacyfinish = function onlegacyfinish() { + if (!stream.writable) onfinish(); + }; + + var writableEnded = stream._writableState && stream._writableState.finished; + + var onfinish = function onfinish() { + writable = false; + writableEnded = true; + if (!readable) callback.call(stream); + }; + + var readableEnded = stream._readableState && stream._readableState.endEmitted; + + var onend = function onend() { + readable = false; + readableEnded = true; + if (!writable) callback.call(stream); + }; + + var onerror = function onerror(err) { + callback.call(stream, err); + }; + + var onclose = function onclose() { + var err; + + if (readable && !readableEnded) { + if (!stream._readableState || !stream._readableState.ended) err = new ERR_STREAM_PREMATURE_CLOSE(); + return callback.call(stream, err); + } + + if (writable && !writableEnded) { + if (!stream._writableState || !stream._writableState.ended) err = new ERR_STREAM_PREMATURE_CLOSE(); + return callback.call(stream, err); + } + }; + + var onrequest = function onrequest() { + stream.req.on('finish', onfinish); + }; + + if (isRequest(stream)) { + stream.on('complete', onfinish); + stream.on('abort', onclose); + if (stream.req) onrequest();else stream.on('request', onrequest); + } else if (writable && !stream._writableState) { + // legacy streams + stream.on('end', onlegacyfinish); + stream.on('close', onlegacyfinish); + } + + stream.on('end', onend); + stream.on('finish', onfinish); + if (opts.error !== false) stream.on('error', onerror); + stream.on('close', onclose); + return function () { + stream.removeListener('complete', onfinish); + stream.removeListener('abort', onclose); + stream.removeListener('request', onrequest); + if (stream.req) stream.req.removeListener('finish', onfinish); + stream.removeListener('end', onlegacyfinish); + stream.removeListener('close', onlegacyfinish); + stream.removeListener('finish', onfinish); + stream.removeListener('end', onend); + stream.removeListener('error', onerror); + stream.removeListener('close', onclose); + }; +} + +module.exports = eos; + +/***/ }), + +/***/ 4996: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +"use strict"; + + +function asyncGeneratorStep(gen, resolve, reject, _next, _throw, key, arg) { try { var info = gen[key](arg); var value = info.value; } catch (error) { reject(error); return; } if (info.done) { resolve(value); } else { Promise.resolve(value).then(_next, _throw); } } + +function _asyncToGenerator(fn) { return function () { var self = this, args = arguments; return new Promise(function (resolve, reject) { var gen = fn.apply(self, args); function _next(value) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "next", value); } function _throw(err) { asyncGeneratorStep(gen, resolve, reject, _next, _throw, "throw", err); } _next(undefined); }); }; } + +function ownKeys(object, enumerableOnly) { var keys = Object.keys(object); if (Object.getOwnPropertySymbols) { var symbols = Object.getOwnPropertySymbols(object); if (enumerableOnly) symbols = symbols.filter(function (sym) { return Object.getOwnPropertyDescriptor(object, sym).enumerable; }); keys.push.apply(keys, symbols); } return keys; } + +function _objectSpread(target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i] != null ? arguments[i] : {}; if (i % 2) { ownKeys(Object(source), true).forEach(function (key) { _defineProperty(target, key, source[key]); }); } else if (Object.getOwnPropertyDescriptors) { Object.defineProperties(target, Object.getOwnPropertyDescriptors(source)); } else { ownKeys(Object(source)).forEach(function (key) { Object.defineProperty(target, key, Object.getOwnPropertyDescriptor(source, key)); }); } } return target; } + +function _defineProperty(obj, key, value) { if (key in obj) { Object.defineProperty(obj, key, { value: value, enumerable: true, configurable: true, writable: true }); } else { obj[key] = value; } return obj; } + +var ERR_INVALID_ARG_TYPE = (__nccwpck_require__(7190)/* .codes.ERR_INVALID_ARG_TYPE */ .q.ERR_INVALID_ARG_TYPE); + +function from(Readable, iterable, opts) { + var iterator; + + if (iterable && typeof iterable.next === 'function') { + iterator = iterable; + } else if (iterable && iterable[Symbol.asyncIterator]) iterator = iterable[Symbol.asyncIterator]();else if (iterable && iterable[Symbol.iterator]) iterator = iterable[Symbol.iterator]();else throw new ERR_INVALID_ARG_TYPE('iterable', ['Iterable'], iterable); + + var readable = new Readable(_objectSpread({ + objectMode: true + }, opts)); // Reading boolean to protect against _read + // being called before last iteration completion. + + var reading = false; + + readable._read = function () { + if (!reading) { + reading = true; + next(); + } + }; + + function next() { + return _next2.apply(this, arguments); + } + + function _next2() { + _next2 = _asyncToGenerator(function* () { + try { + var _ref = yield iterator.next(), + value = _ref.value, + done = _ref.done; + + if (done) { + readable.push(null); + } else if (readable.push((yield value))) { + next(); + } else { + reading = false; + } + } catch (err) { + readable.destroy(err); + } + }); + return _next2.apply(this, arguments); + } + + return readable; +} + +module.exports = from; + +/***/ }), + +/***/ 44138: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +"use strict"; +// Ported from https://github.com/mafintosh/pump with +// permission from the author, Mathias Buus (@mafintosh). + + +var eos; + +function once(callback) { + var called = false; + return function () { + if (called) return; + called = true; + callback.apply(void 0, arguments); + }; +} + +var _require$codes = (__nccwpck_require__(7190)/* .codes */ .q), + ERR_MISSING_ARGS = _require$codes.ERR_MISSING_ARGS, + ERR_STREAM_DESTROYED = _require$codes.ERR_STREAM_DESTROYED; + +function noop(err) { + // Rethrow the error if it exists to avoid swallowing it + if (err) throw err; +} + +function isRequest(stream) { + return stream.setHeader && typeof stream.abort === 'function'; +} + +function destroyer(stream, reading, writing, callback) { + callback = once(callback); + var closed = false; + stream.on('close', function () { + closed = true; + }); + if (eos === undefined) eos = __nccwpck_require__(60193); + eos(stream, { + readable: reading, + writable: writing + }, function (err) { + if (err) return callback(err); + closed = true; + callback(); + }); + var destroyed = false; + return function (err) { + if (closed) return; + if (destroyed) return; + destroyed = true; // request.destroy just do .end - .abort is what we want + + if (isRequest(stream)) return stream.abort(); + if (typeof stream.destroy === 'function') return stream.destroy(); + callback(err || new ERR_STREAM_DESTROYED('pipe')); + }; +} + +function call(fn) { + fn(); +} + +function pipe(from, to) { + return from.pipe(to); +} + +function popCallback(streams) { + if (!streams.length) return noop; + if (typeof streams[streams.length - 1] !== 'function') return noop; + return streams.pop(); +} + +function pipeline() { + for (var _len = arguments.length, streams = new Array(_len), _key = 0; _key < _len; _key++) { + streams[_key] = arguments[_key]; + } + + var callback = popCallback(streams); + if (Array.isArray(streams[0])) streams = streams[0]; + + if (streams.length < 2) { + throw new ERR_MISSING_ARGS('streams'); + } + + var error; + var destroys = streams.map(function (stream, i) { + var reading = i < streams.length - 1; + var writing = i > 0; + return destroyer(stream, reading, writing, function (err) { + if (!error) error = err; + if (err) destroys.forEach(call); + if (reading) return; + destroys.forEach(call); + callback(error); + }); + }); + return streams.reduce(pipe); +} + +module.exports = pipeline; + +/***/ }), + +/***/ 95408: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +"use strict"; + + +var ERR_INVALID_OPT_VALUE = (__nccwpck_require__(7190)/* .codes.ERR_INVALID_OPT_VALUE */ .q.ERR_INVALID_OPT_VALUE); + +function highWaterMarkFrom(options, isDuplex, duplexKey) { + return options.highWaterMark != null ? options.highWaterMark : isDuplex ? options[duplexKey] : null; +} + +function getHighWaterMark(state, options, duplexKey, isDuplex) { + var hwm = highWaterMarkFrom(options, isDuplex, duplexKey); + + if (hwm != null) { + if (!(isFinite(hwm) && Math.floor(hwm) === hwm) || hwm < 0) { + var name = isDuplex ? duplexKey : 'highWaterMark'; + throw new ERR_INVALID_OPT_VALUE(name, hwm); + } + + return Math.floor(hwm); + } // Default value + + + return state.objectMode ? 16 : 16 * 1024; +} + +module.exports = { + getHighWaterMark: getHighWaterMark +}; + +/***/ }), + +/***/ 49995: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +module.exports = __nccwpck_require__(12781); + + +/***/ }), + +/***/ 67031: +/***/ ((module, exports, __nccwpck_require__) => { + +var Stream = __nccwpck_require__(12781); +if (process.env.READABLE_STREAM === 'disable' && Stream) { + module.exports = Stream.Readable; + Object.assign(module.exports, Stream); + module.exports.Stream = Stream; +} else { + exports = module.exports = __nccwpck_require__(41132); + exports.Stream = Stream || exports; + exports.Readable = exports; + exports.Writable = __nccwpck_require__(56573); + exports.Duplex = __nccwpck_require__(91107); + exports.Transform = __nccwpck_require__(38469); + exports.PassThrough = __nccwpck_require__(30933); + exports.finished = __nccwpck_require__(60193); + exports.pipeline = __nccwpck_require__(44138); +} + + +/***/ }), + +/***/ 36843: +/***/ ((module) => { + +// Copyright 2011 Mark Cavage All rights reserved. + + +module.exports = { + + newInvalidAsn1Error: function (msg) { + var e = new Error(); + e.name = 'InvalidAsn1Error'; + e.message = msg || ''; + return e; + } + +}; + + +/***/ }), + +/***/ 77136: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +// Copyright 2011 Mark Cavage All rights reserved. + +var errors = __nccwpck_require__(36843); +var types = __nccwpck_require__(50695); + +var Reader = __nccwpck_require__(697); +var Writer = __nccwpck_require__(35436); + + +// --- Exports + +module.exports = { + + Reader: Reader, + + Writer: Writer + +}; + +for (var t in types) { + if (types.hasOwnProperty(t)) + module.exports[t] = types[t]; +} +for (var e in errors) { + if (errors.hasOwnProperty(e)) + module.exports[e] = errors[e]; +} + + +/***/ }), + +/***/ 697: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +// Copyright 2011 Mark Cavage All rights reserved. + +var assert = __nccwpck_require__(39491); +var Buffer = (__nccwpck_require__(83574).Buffer); + +var ASN1 = __nccwpck_require__(50695); +var errors = __nccwpck_require__(36843); + + +// --- Globals + +var newInvalidAsn1Error = errors.newInvalidAsn1Error; + + + +// --- API + +function Reader(data) { + if (!data || !Buffer.isBuffer(data)) + throw new TypeError('data must be a node Buffer'); + + this._buf = data; + this._size = data.length; + + // These hold the "current" state + this._len = 0; + this._offset = 0; +} + +Object.defineProperty(Reader.prototype, 'length', { + enumerable: true, + get: function () { return (this._len); } +}); + +Object.defineProperty(Reader.prototype, 'offset', { + enumerable: true, + get: function () { return (this._offset); } +}); + +Object.defineProperty(Reader.prototype, 'remain', { + get: function () { return (this._size - this._offset); } +}); + +Object.defineProperty(Reader.prototype, 'buffer', { + get: function () { return (this._buf.slice(this._offset)); } +}); + + +/** + * Reads a single byte and advances offset; you can pass in `true` to make this + * a "peek" operation (i.e., get the byte, but don't advance the offset). + * + * @param {Boolean} peek true means don't move offset. + * @return {Number} the next byte, null if not enough data. + */ +Reader.prototype.readByte = function (peek) { + if (this._size - this._offset < 1) + return null; + + var b = this._buf[this._offset] & 0xff; + + if (!peek) + this._offset += 1; + + return b; +}; + + +Reader.prototype.peek = function () { + return this.readByte(true); +}; + + +/** + * Reads a (potentially) variable length off the BER buffer. This call is + * not really meant to be called directly, as callers have to manipulate + * the internal buffer afterwards. + * + * As a result of this call, you can call `Reader.length`, until the + * next thing called that does a readLength. + * + * @return {Number} the amount of offset to advance the buffer. + * @throws {InvalidAsn1Error} on bad ASN.1 + */ +Reader.prototype.readLength = function (offset) { + if (offset === undefined) + offset = this._offset; + + if (offset >= this._size) + return null; + + var lenB = this._buf[offset++] & 0xff; + if (lenB === null) + return null; + + if ((lenB & 0x80) === 0x80) { + lenB &= 0x7f; + + if (lenB === 0) + throw newInvalidAsn1Error('Indefinite length not supported'); + + if (lenB > 4) + throw newInvalidAsn1Error('encoding too long'); + + if (this._size - offset < lenB) + return null; + + this._len = 0; + for (var i = 0; i < lenB; i++) + this._len = (this._len << 8) + (this._buf[offset++] & 0xff); + + } else { + // Wasn't a variable length + this._len = lenB; + } + + return offset; +}; + + +/** + * Parses the next sequence in this BER buffer. + * + * To get the length of the sequence, call `Reader.length`. + * + * @return {Number} the sequence's tag. + */ +Reader.prototype.readSequence = function (tag) { + var seq = this.peek(); + if (seq === null) + return null; + if (tag !== undefined && tag !== seq) + throw newInvalidAsn1Error('Expected 0x' + tag.toString(16) + + ': got 0x' + seq.toString(16)); + + var o = this.readLength(this._offset + 1); // stored in `length` + if (o === null) + return null; + + this._offset = o; + return seq; +}; + + +Reader.prototype.readInt = function () { + return this._readTag(ASN1.Integer); +}; + + +Reader.prototype.readBoolean = function () { + return (this._readTag(ASN1.Boolean) === 0 ? false : true); +}; + + +Reader.prototype.readEnumeration = function () { + return this._readTag(ASN1.Enumeration); +}; + + +Reader.prototype.readString = function (tag, retbuf) { + if (!tag) + tag = ASN1.OctetString; + + var b = this.peek(); + if (b === null) + return null; + + if (b !== tag) + throw newInvalidAsn1Error('Expected 0x' + tag.toString(16) + + ': got 0x' + b.toString(16)); + + var o = this.readLength(this._offset + 1); // stored in `length` + + if (o === null) + return null; + + if (this.length > this._size - o) + return null; + + this._offset = o; + + if (this.length === 0) + return retbuf ? Buffer.alloc(0) : ''; + + var str = this._buf.slice(this._offset, this._offset + this.length); + this._offset += this.length; + + return retbuf ? str : str.toString('utf8'); +}; + +Reader.prototype.readOID = function (tag) { + if (!tag) + tag = ASN1.OID; + + var b = this.readString(tag, true); + if (b === null) + return null; + + var values = []; + var value = 0; + + for (var i = 0; i < b.length; i++) { + var byte = b[i] & 0xff; + + value <<= 7; + value += byte & 0x7f; + if ((byte & 0x80) === 0) { + values.push(value); + value = 0; + } + } + + value = values.shift(); + values.unshift(value % 40); + values.unshift((value / 40) >> 0); + + return values.join('.'); +}; + + +Reader.prototype._readTag = function (tag) { + assert.ok(tag !== undefined); + + var b = this.peek(); + + if (b === null) + return null; + + if (b !== tag) + throw newInvalidAsn1Error('Expected 0x' + tag.toString(16) + + ': got 0x' + b.toString(16)); + + var o = this.readLength(this._offset + 1); // stored in `length` + if (o === null) + return null; + + if (this.length > 4) + throw newInvalidAsn1Error('Integer too long: ' + this.length); + + if (this.length > this._size - o) + return null; + this._offset = o; + + var fb = this._buf[this._offset]; + var value = 0; + + for (var i = 0; i < this.length; i++) { + value <<= 8; + value |= (this._buf[this._offset++] & 0xff); + } + + if ((fb & 0x80) === 0x80 && i !== 4) + value -= (1 << (i * 8)); + + return value >> 0; +}; + + + +// --- Exported API + +module.exports = Reader; + + +/***/ }), + +/***/ 50695: +/***/ ((module) => { + +// Copyright 2011 Mark Cavage All rights reserved. + + +module.exports = { + EOC: 0, + Boolean: 1, + Integer: 2, + BitString: 3, + OctetString: 4, + Null: 5, + OID: 6, + ObjectDescriptor: 7, + External: 8, + Real: 9, // float + Enumeration: 10, + PDV: 11, + Utf8String: 12, + RelativeOID: 13, + Sequence: 16, + Set: 17, + NumericString: 18, + PrintableString: 19, + T61String: 20, + VideotexString: 21, + IA5String: 22, + UTCTime: 23, + GeneralizedTime: 24, + GraphicString: 25, + VisibleString: 26, + GeneralString: 28, + UniversalString: 29, + CharacterString: 30, + BMPString: 31, + Constructor: 32, + Context: 128 +}; + + +/***/ }), + +/***/ 35436: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +// Copyright 2011 Mark Cavage All rights reserved. + +var assert = __nccwpck_require__(39491); +var Buffer = (__nccwpck_require__(83574).Buffer); +var ASN1 = __nccwpck_require__(50695); +var errors = __nccwpck_require__(36843); + + +// --- Globals + +var newInvalidAsn1Error = errors.newInvalidAsn1Error; + +var DEFAULT_OPTS = { + size: 1024, + growthFactor: 8 +}; + + +// --- Helpers + +function merge(from, to) { + assert.ok(from); + assert.equal(typeof (from), 'object'); + assert.ok(to); + assert.equal(typeof (to), 'object'); + + var keys = Object.getOwnPropertyNames(from); + keys.forEach(function (key) { + if (to[key]) + return; + + var value = Object.getOwnPropertyDescriptor(from, key); + Object.defineProperty(to, key, value); + }); + + return to; +} + + + +// --- API + +function Writer(options) { + options = merge(DEFAULT_OPTS, options || {}); + + this._buf = Buffer.alloc(options.size || 1024); + this._size = this._buf.length; + this._offset = 0; + this._options = options; + + // A list of offsets in the buffer where we need to insert + // sequence tag/len pairs. + this._seq = []; +} + +Object.defineProperty(Writer.prototype, 'buffer', { + get: function () { + if (this._seq.length) + throw newInvalidAsn1Error(this._seq.length + ' unended sequence(s)'); + + return (this._buf.slice(0, this._offset)); + } +}); + +Writer.prototype.writeByte = function (b) { + if (typeof (b) !== 'number') + throw new TypeError('argument must be a Number'); + + this._ensure(1); + this._buf[this._offset++] = b; +}; + + +Writer.prototype.writeInt = function (i, tag) { + if (typeof (i) !== 'number') + throw new TypeError('argument must be a Number'); + if (typeof (tag) !== 'number') + tag = ASN1.Integer; + + var sz = 4; + + while ((((i & 0xff800000) === 0) || ((i & 0xff800000) === 0xff800000 >> 0)) && + (sz > 1)) { + sz--; + i <<= 8; + } + + if (sz > 4) + throw newInvalidAsn1Error('BER ints cannot be > 0xffffffff'); + + this._ensure(2 + sz); + this._buf[this._offset++] = tag; + this._buf[this._offset++] = sz; + + while (sz-- > 0) { + this._buf[this._offset++] = ((i & 0xff000000) >>> 24); + i <<= 8; + } + +}; + + +Writer.prototype.writeNull = function () { + this.writeByte(ASN1.Null); + this.writeByte(0x00); +}; + + +Writer.prototype.writeEnumeration = function (i, tag) { + if (typeof (i) !== 'number') + throw new TypeError('argument must be a Number'); + if (typeof (tag) !== 'number') + tag = ASN1.Enumeration; + + return this.writeInt(i, tag); +}; + + +Writer.prototype.writeBoolean = function (b, tag) { + if (typeof (b) !== 'boolean') + throw new TypeError('argument must be a Boolean'); + if (typeof (tag) !== 'number') + tag = ASN1.Boolean; + + this._ensure(3); + this._buf[this._offset++] = tag; + this._buf[this._offset++] = 0x01; + this._buf[this._offset++] = b ? 0xff : 0x00; +}; + + +Writer.prototype.writeString = function (s, tag) { + if (typeof (s) !== 'string') + throw new TypeError('argument must be a string (was: ' + typeof (s) + ')'); + if (typeof (tag) !== 'number') + tag = ASN1.OctetString; + + var len = Buffer.byteLength(s); + this.writeByte(tag); + this.writeLength(len); + if (len) { + this._ensure(len); + this._buf.write(s, this._offset); + this._offset += len; + } +}; + + +Writer.prototype.writeBuffer = function (buf, tag) { + if (typeof (tag) !== 'number') + throw new TypeError('tag must be a number'); + if (!Buffer.isBuffer(buf)) + throw new TypeError('argument must be a buffer'); + + this.writeByte(tag); + this.writeLength(buf.length); + this._ensure(buf.length); + buf.copy(this._buf, this._offset, 0, buf.length); + this._offset += buf.length; +}; + + +Writer.prototype.writeStringArray = function (strings) { + if ((!strings instanceof Array)) + throw new TypeError('argument must be an Array[String]'); + + var self = this; + strings.forEach(function (s) { + self.writeString(s); + }); +}; + +// This is really to solve DER cases, but whatever for now +Writer.prototype.writeOID = function (s, tag) { + if (typeof (s) !== 'string') + throw new TypeError('argument must be a string'); + if (typeof (tag) !== 'number') + tag = ASN1.OID; + + if (!/^([0-9]+\.){3,}[0-9]+$/.test(s)) + throw new Error('argument is not a valid OID string'); + + function encodeOctet(bytes, octet) { + if (octet < 128) { + bytes.push(octet); + } else if (octet < 16384) { + bytes.push((octet >>> 7) | 0x80); + bytes.push(octet & 0x7F); + } else if (octet < 2097152) { + bytes.push((octet >>> 14) | 0x80); + bytes.push(((octet >>> 7) | 0x80) & 0xFF); + bytes.push(octet & 0x7F); + } else if (octet < 268435456) { + bytes.push((octet >>> 21) | 0x80); + bytes.push(((octet >>> 14) | 0x80) & 0xFF); + bytes.push(((octet >>> 7) | 0x80) & 0xFF); + bytes.push(octet & 0x7F); + } else { + bytes.push(((octet >>> 28) | 0x80) & 0xFF); + bytes.push(((octet >>> 21) | 0x80) & 0xFF); + bytes.push(((octet >>> 14) | 0x80) & 0xFF); + bytes.push(((octet >>> 7) | 0x80) & 0xFF); + bytes.push(octet & 0x7F); + } + } + + var tmp = s.split('.'); + var bytes = []; + bytes.push(parseInt(tmp[0], 10) * 40 + parseInt(tmp[1], 10)); + tmp.slice(2).forEach(function (b) { + encodeOctet(bytes, parseInt(b, 10)); + }); + + var self = this; + this._ensure(2 + bytes.length); + this.writeByte(tag); + this.writeLength(bytes.length); + bytes.forEach(function (b) { + self.writeByte(b); + }); +}; + + +Writer.prototype.writeLength = function (len) { + if (typeof (len) !== 'number') + throw new TypeError('argument must be a Number'); + + this._ensure(4); + + if (len <= 0x7f) { + this._buf[this._offset++] = len; + } else if (len <= 0xff) { + this._buf[this._offset++] = 0x81; + this._buf[this._offset++] = len; + } else if (len <= 0xffff) { + this._buf[this._offset++] = 0x82; + this._buf[this._offset++] = len >> 8; + this._buf[this._offset++] = len; + } else if (len <= 0xffffff) { + this._buf[this._offset++] = 0x83; + this._buf[this._offset++] = len >> 16; + this._buf[this._offset++] = len >> 8; + this._buf[this._offset++] = len; + } else { + throw newInvalidAsn1Error('Length too long (> 4 bytes)'); + } +}; + +Writer.prototype.startSequence = function (tag) { + if (typeof (tag) !== 'number') + tag = ASN1.Sequence | ASN1.Constructor; + + this.writeByte(tag); + this._seq.push(this._offset); + this._ensure(3); + this._offset += 3; +}; + + +Writer.prototype.endSequence = function () { + var seq = this._seq.pop(); + var start = seq + 3; + var len = this._offset - start; + + if (len <= 0x7f) { + this._shift(start, len, -2); + this._buf[seq] = len; + } else if (len <= 0xff) { + this._shift(start, len, -1); + this._buf[seq] = 0x81; + this._buf[seq + 1] = len; + } else if (len <= 0xffff) { + this._buf[seq] = 0x82; + this._buf[seq + 1] = len >> 8; + this._buf[seq + 2] = len; + } else if (len <= 0xffffff) { + this._shift(start, len, 1); + this._buf[seq] = 0x83; + this._buf[seq + 1] = len >> 16; + this._buf[seq + 2] = len >> 8; + this._buf[seq + 3] = len; + } else { + throw newInvalidAsn1Error('Sequence too long'); + } +}; + + +Writer.prototype._shift = function (start, len, shift) { + assert.ok(start !== undefined); + assert.ok(len !== undefined); + assert.ok(shift); + + this._buf.copy(this._buf, start + shift, start, start + len); + this._offset += shift; +}; + +Writer.prototype._ensure = function (len) { + assert.ok(len); + + if (this._size - this._offset < len) { + var sz = this._size * this._options.growthFactor; + if (sz - this._offset < len) + sz += len; + + var buf = Buffer.alloc(sz); + + this._buf.copy(buf, 0, 0, this._offset); + this._buf = buf; + this._size = sz; + } +}; + + + +// --- Exported API + +module.exports = Writer; + + +/***/ }), + +/***/ 58889: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +// Copyright 2011 Mark Cavage All rights reserved. + +// If you have no idea what ASN.1 or BER is, see this: +// ftp://ftp.rsa.com/pub/pkcs/ascii/layman.asc + +var Ber = __nccwpck_require__(77136); + + + +// --- Exported API + +module.exports = { + + Ber: Ber, + + BerReader: Ber.Reader, + + BerWriter: Ber.Writer + +}; + + +/***/ }), + +/***/ 25072: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +// Copyright (c) 2012, Mark Cavage. All rights reserved. +// Copyright 2015 Joyent, Inc. + +var assert = __nccwpck_require__(39491); +var Stream = (__nccwpck_require__(12781).Stream); +var util = __nccwpck_require__(73837); + + +///--- Globals + +/* JSSTYLED */ +var UUID_REGEXP = /^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$/; + + +///--- Internal + +function _capitalize(str) { + return (str.charAt(0).toUpperCase() + str.slice(1)); +} + +function _toss(name, expected, oper, arg, actual) { + throw new assert.AssertionError({ + message: util.format('%s (%s) is required', name, expected), + actual: (actual === undefined) ? typeof (arg) : actual(arg), + expected: expected, + operator: oper || '===', + stackStartFunction: _toss.caller + }); +} + +function _getClass(arg) { + return (Object.prototype.toString.call(arg).slice(8, -1)); +} + +function noop() { + // Why even bother with asserts? +} + + +///--- Exports + +var types = { + bool: { + check: function (arg) { return typeof (arg) === 'boolean'; } + }, + func: { + check: function (arg) { return typeof (arg) === 'function'; } + }, + string: { + check: function (arg) { return typeof (arg) === 'string'; } + }, + object: { + check: function (arg) { + return typeof (arg) === 'object' && arg !== null; + } + }, + number: { + check: function (arg) { + return typeof (arg) === 'number' && !isNaN(arg); + } + }, + finite: { + check: function (arg) { + return typeof (arg) === 'number' && !isNaN(arg) && isFinite(arg); + } + }, + buffer: { + check: function (arg) { return Buffer.isBuffer(arg); }, + operator: 'Buffer.isBuffer' + }, + array: { + check: function (arg) { return Array.isArray(arg); }, + operator: 'Array.isArray' + }, + stream: { + check: function (arg) { return arg instanceof Stream; }, + operator: 'instanceof', + actual: _getClass + }, + date: { + check: function (arg) { return arg instanceof Date; }, + operator: 'instanceof', + actual: _getClass + }, + regexp: { + check: function (arg) { return arg instanceof RegExp; }, + operator: 'instanceof', + actual: _getClass + }, + uuid: { + check: function (arg) { + return typeof (arg) === 'string' && UUID_REGEXP.test(arg); + }, + operator: 'isUUID' + } +}; + +function _setExports(ndebug) { + var keys = Object.keys(types); + var out; + + /* re-export standard assert */ + if (process.env.NODE_NDEBUG) { + out = noop; + } else { + out = function (arg, msg) { + if (!arg) { + _toss(msg, 'true', arg); + } + }; + } + + /* standard checks */ + keys.forEach(function (k) { + if (ndebug) { + out[k] = noop; + return; + } + var type = types[k]; + out[k] = function (arg, msg) { + if (!type.check(arg)) { + _toss(msg, k, type.operator, arg, type.actual); + } + }; + }); + + /* optional checks */ + keys.forEach(function (k) { + var name = 'optional' + _capitalize(k); + if (ndebug) { + out[name] = noop; + return; + } + var type = types[k]; + out[name] = function (arg, msg) { + if (arg === undefined || arg === null) { + return; + } + if (!type.check(arg)) { + _toss(msg, k, type.operator, arg, type.actual); + } + }; + }); + + /* arrayOf checks */ + keys.forEach(function (k) { + var name = 'arrayOf' + _capitalize(k); + if (ndebug) { + out[name] = noop; + return; + } + var type = types[k]; + var expected = '[' + k + ']'; + out[name] = function (arg, msg) { + if (!Array.isArray(arg)) { + _toss(msg, expected, type.operator, arg, type.actual); + } + var i; + for (i = 0; i < arg.length; i++) { + if (!type.check(arg[i])) { + _toss(msg, expected, type.operator, arg, type.actual); + } + } + }; + }); + + /* optionalArrayOf checks */ + keys.forEach(function (k) { + var name = 'optionalArrayOf' + _capitalize(k); + if (ndebug) { + out[name] = noop; + return; + } + var type = types[k]; + var expected = '[' + k + ']'; + out[name] = function (arg, msg) { + if (arg === undefined || arg === null) { + return; + } + if (!Array.isArray(arg)) { + _toss(msg, expected, type.operator, arg, type.actual); + } + var i; + for (i = 0; i < arg.length; i++) { + if (!type.check(arg[i])) { + _toss(msg, expected, type.operator, arg, type.actual); + } + } + }; + }); + + /* re-export built-in assertions */ + Object.keys(assert).forEach(function (k) { + if (k === 'AssertionError') { + out[k] = assert[k]; + return; + } + if (ndebug) { + out[k] = noop; + return; + } + out[k] = assert[k]; + }); + + /* export ourselves (for unit tests _only_) */ + out._setExports = _setExports; + + return out; +} + +module.exports = _setExports(process.env.NODE_NDEBUG); + + +/***/ }), + +/***/ 5181: +/***/ (function(__unused_webpack_module, exports) { + +(function (global, factory) { + true ? factory(exports) : + 0; +}(this, (function (exports) { 'use strict'; + + /** + * Creates a continuation function with some arguments already applied. + * + * Useful as a shorthand when combined with other control flow functions. Any + * arguments passed to the returned function are added to the arguments + * originally passed to apply. + * + * @name apply + * @static + * @memberOf module:Utils + * @method + * @category Util + * @param {Function} fn - The function you want to eventually apply all + * arguments to. Invokes with (arguments...). + * @param {...*} arguments... - Any number of arguments to automatically apply + * when the continuation is called. + * @returns {Function} the partially-applied function + * @example + * + * // using apply + * async.parallel([ + * async.apply(fs.writeFile, 'testfile1', 'test1'), + * async.apply(fs.writeFile, 'testfile2', 'test2') + * ]); + * + * + * // the same process without using apply + * async.parallel([ + * function(callback) { + * fs.writeFile('testfile1', 'test1', callback); + * }, + * function(callback) { + * fs.writeFile('testfile2', 'test2', callback); + * } + * ]); + * + * // It's possible to pass any number of additional arguments when calling the + * // continuation: + * + * node> var fn = async.apply(sys.puts, 'one'); + * node> fn('two', 'three'); + * one + * two + * three + */ + function apply(fn, ...args) { + return (...callArgs) => fn(...args,...callArgs); + } + + function initialParams (fn) { + return function (...args/*, callback*/) { + var callback = args.pop(); + return fn.call(this, args, callback); + }; + } + + /* istanbul ignore file */ + + var hasSetImmediate = typeof setImmediate === 'function' && setImmediate; + var hasNextTick = typeof process === 'object' && typeof process.nextTick === 'function'; + + function fallback(fn) { + setTimeout(fn, 0); + } + + function wrap(defer) { + return (fn, ...args) => defer(() => fn(...args)); + } + + var _defer; + + if (hasSetImmediate) { + _defer = setImmediate; + } else if (hasNextTick) { + _defer = process.nextTick; + } else { + _defer = fallback; + } + + var setImmediate$1 = wrap(_defer); + + /** + * Take a sync function and make it async, passing its return value to a + * callback. This is useful for plugging sync functions into a waterfall, + * series, or other async functions. Any arguments passed to the generated + * function will be passed to the wrapped function (except for the final + * callback argument). Errors thrown will be passed to the callback. + * + * If the function passed to `asyncify` returns a Promise, that promises's + * resolved/rejected state will be used to call the callback, rather than simply + * the synchronous return value. + * + * This also means you can asyncify ES2017 `async` functions. + * + * @name asyncify + * @static + * @memberOf module:Utils + * @method + * @alias wrapSync + * @category Util + * @param {Function} func - The synchronous function, or Promise-returning + * function to convert to an {@link AsyncFunction}. + * @returns {AsyncFunction} An asynchronous wrapper of the `func`. To be + * invoked with `(args..., callback)`. + * @example + * + * // passing a regular synchronous function + * async.waterfall([ + * async.apply(fs.readFile, filename, "utf8"), + * async.asyncify(JSON.parse), + * function (data, next) { + * // data is the result of parsing the text. + * // If there was a parsing error, it would have been caught. + * } + * ], callback); + * + * // passing a function returning a promise + * async.waterfall([ + * async.apply(fs.readFile, filename, "utf8"), + * async.asyncify(function (contents) { + * return db.model.create(contents); + * }), + * function (model, next) { + * // `model` is the instantiated model object. + * // If there was an error, this function would be skipped. + * } + * ], callback); + * + * // es2017 example, though `asyncify` is not needed if your JS environment + * // supports async functions out of the box + * var q = async.queue(async.asyncify(async function(file) { + * var intermediateStep = await processFile(file); + * return await somePromise(intermediateStep) + * })); + * + * q.push(files); + */ + function asyncify(func) { + if (isAsync(func)) { + return function (...args/*, callback*/) { + const callback = args.pop(); + const promise = func.apply(this, args); + return handlePromise(promise, callback) + } + } + + return initialParams(function (args, callback) { + var result; + try { + result = func.apply(this, args); + } catch (e) { + return callback(e); + } + // if result is Promise object + if (result && typeof result.then === 'function') { + return handlePromise(result, callback) + } else { + callback(null, result); + } + }); + } + + function handlePromise(promise, callback) { + return promise.then(value => { + invokeCallback(callback, null, value); + }, err => { + invokeCallback(callback, err && err.message ? err : new Error(err)); + }); + } + + function invokeCallback(callback, error, value) { + try { + callback(error, value); + } catch (err) { + setImmediate$1(e => { throw e }, err); + } + } + + function isAsync(fn) { + return fn[Symbol.toStringTag] === 'AsyncFunction'; + } + + function isAsyncGenerator(fn) { + return fn[Symbol.toStringTag] === 'AsyncGenerator'; + } + + function isAsyncIterable(obj) { + return typeof obj[Symbol.asyncIterator] === 'function'; + } + + function wrapAsync(asyncFn) { + if (typeof asyncFn !== 'function') throw new Error('expected a function') + return isAsync(asyncFn) ? asyncify(asyncFn) : asyncFn; + } + + // conditionally promisify a function. + // only return a promise if a callback is omitted + function awaitify (asyncFn, arity = asyncFn.length) { + if (!arity) throw new Error('arity is undefined') + function awaitable (...args) { + if (typeof args[arity - 1] === 'function') { + return asyncFn.apply(this, args) + } + + return new Promise((resolve, reject) => { + args[arity - 1] = (err, ...cbArgs) => { + if (err) return reject(err) + resolve(cbArgs.length > 1 ? cbArgs : cbArgs[0]); + }; + asyncFn.apply(this, args); + }) + } + + return awaitable + } + + function applyEach (eachfn) { + return function applyEach(fns, ...callArgs) { + const go = awaitify(function (callback) { + var that = this; + return eachfn(fns, (fn, cb) => { + wrapAsync(fn).apply(that, callArgs.concat(cb)); + }, callback); + }); + return go; + }; + } + + function _asyncMap(eachfn, arr, iteratee, callback) { + arr = arr || []; + var results = []; + var counter = 0; + var _iteratee = wrapAsync(iteratee); + + return eachfn(arr, (value, _, iterCb) => { + var index = counter++; + _iteratee(value, (err, v) => { + results[index] = v; + iterCb(err); + }); + }, err => { + callback(err, results); + }); + } + + function isArrayLike(value) { + return value && + typeof value.length === 'number' && + value.length >= 0 && + value.length % 1 === 0; + } + + // A temporary value used to identify if the loop should be broken. + // See #1064, #1293 + const breakLoop = {}; + + function once(fn) { + function wrapper (...args) { + if (fn === null) return; + var callFn = fn; + fn = null; + callFn.apply(this, args); + } + Object.assign(wrapper, fn); + return wrapper + } + + function getIterator (coll) { + return coll[Symbol.iterator] && coll[Symbol.iterator](); + } + + function createArrayIterator(coll) { + var i = -1; + var len = coll.length; + return function next() { + return ++i < len ? {value: coll[i], key: i} : null; + } + } + + function createES2015Iterator(iterator) { + var i = -1; + return function next() { + var item = iterator.next(); + if (item.done) + return null; + i++; + return {value: item.value, key: i}; + } + } + + function createObjectIterator(obj) { + var okeys = obj ? Object.keys(obj) : []; + var i = -1; + var len = okeys.length; + return function next() { + var key = okeys[++i]; + return i < len ? {value: obj[key], key} : null; + }; + } + + function createIterator(coll) { + if (isArrayLike(coll)) { + return createArrayIterator(coll); + } + + var iterator = getIterator(coll); + return iterator ? createES2015Iterator(iterator) : createObjectIterator(coll); + } + + function onlyOnce(fn) { + return function (...args) { + if (fn === null) throw new Error("Callback was already called."); + var callFn = fn; + fn = null; + callFn.apply(this, args); + }; + } + + // for async generators + function asyncEachOfLimit(generator, limit, iteratee, callback) { + let done = false; + let canceled = false; + let awaiting = false; + let running = 0; + let idx = 0; + + function replenish() { + //console.log('replenish') + if (running >= limit || awaiting || done) return + //console.log('replenish awaiting') + awaiting = true; + generator.next().then(({value, done: iterDone}) => { + //console.log('got value', value) + if (canceled || done) return + awaiting = false; + if (iterDone) { + done = true; + if (running <= 0) { + //console.log('done nextCb') + callback(null); + } + return; + } + running++; + iteratee(value, idx, iterateeCallback); + idx++; + replenish(); + }).catch(handleError); + } + + function iterateeCallback(err, result) { + //console.log('iterateeCallback') + running -= 1; + if (canceled) return + if (err) return handleError(err) + + if (err === false) { + done = true; + canceled = true; + return + } + + if (result === breakLoop || (done && running <= 0)) { + done = true; + //console.log('done iterCb') + return callback(null); + } + replenish(); + } + + function handleError(err) { + if (canceled) return + awaiting = false; + done = true; + callback(err); + } + + replenish(); + } + + var eachOfLimit = (limit) => { + return (obj, iteratee, callback) => { + callback = once(callback); + if (limit <= 0) { + throw new RangeError('concurrency limit cannot be less than 1') + } + if (!obj) { + return callback(null); + } + if (isAsyncGenerator(obj)) { + return asyncEachOfLimit(obj, limit, iteratee, callback) + } + if (isAsyncIterable(obj)) { + return asyncEachOfLimit(obj[Symbol.asyncIterator](), limit, iteratee, callback) + } + var nextElem = createIterator(obj); + var done = false; + var canceled = false; + var running = 0; + var looping = false; + + function iterateeCallback(err, value) { + if (canceled) return + running -= 1; + if (err) { + done = true; + callback(err); + } + else if (err === false) { + done = true; + canceled = true; + } + else if (value === breakLoop || (done && running <= 0)) { + done = true; + return callback(null); + } + else if (!looping) { + replenish(); + } + } + + function replenish () { + looping = true; + while (running < limit && !done) { + var elem = nextElem(); + if (elem === null) { + done = true; + if (running <= 0) { + callback(null); + } + return; + } + running += 1; + iteratee(elem.value, elem.key, onlyOnce(iterateeCallback)); + } + looping = false; + } + + replenish(); + }; + }; + + /** + * The same as [`eachOf`]{@link module:Collections.eachOf} but runs a maximum of `limit` async operations at a + * time. + * + * @name eachOfLimit + * @static + * @memberOf module:Collections + * @method + * @see [async.eachOf]{@link module:Collections.eachOf} + * @alias forEachOfLimit + * @category Collection + * @param {Array|Iterable|AsyncIterable|Object} coll - A collection to iterate over. + * @param {number} limit - The maximum number of async operations at a time. + * @param {AsyncFunction} iteratee - An async function to apply to each + * item in `coll`. The `key` is the item's key, or index in the case of an + * array. + * Invoked with (item, key, callback). + * @param {Function} [callback] - A callback which is called when all + * `iteratee` functions have finished, or an error occurs. Invoked with (err). + * @returns {Promise} a promise, if a callback is omitted + */ + function eachOfLimit$1(coll, limit, iteratee, callback) { + return eachOfLimit(limit)(coll, wrapAsync(iteratee), callback); + } + + var eachOfLimit$2 = awaitify(eachOfLimit$1, 4); + + // eachOf implementation optimized for array-likes + function eachOfArrayLike(coll, iteratee, callback) { + callback = once(callback); + var index = 0, + completed = 0, + {length} = coll, + canceled = false; + if (length === 0) { + callback(null); + } + + function iteratorCallback(err, value) { + if (err === false) { + canceled = true; + } + if (canceled === true) return + if (err) { + callback(err); + } else if ((++completed === length) || value === breakLoop) { + callback(null); + } + } + + for (; index < length; index++) { + iteratee(coll[index], index, onlyOnce(iteratorCallback)); + } + } + + // a generic version of eachOf which can handle array, object, and iterator cases. + function eachOfGeneric (coll, iteratee, callback) { + return eachOfLimit$2(coll, Infinity, iteratee, callback); + } + + /** + * Like [`each`]{@link module:Collections.each}, except that it passes the key (or index) as the second argument + * to the iteratee. + * + * @name eachOf + * @static + * @memberOf module:Collections + * @method + * @alias forEachOf + * @category Collection + * @see [async.each]{@link module:Collections.each} + * @param {Array|Iterable|AsyncIterable|Object} coll - A collection to iterate over. + * @param {AsyncFunction} iteratee - A function to apply to each + * item in `coll`. + * The `key` is the item's key, or index in the case of an array. + * Invoked with (item, key, callback). + * @param {Function} [callback] - A callback which is called when all + * `iteratee` functions have finished, or an error occurs. Invoked with (err). + * @returns {Promise} a promise, if a callback is omitted + * @example + * + * var obj = {dev: "/dev.json", test: "/test.json", prod: "/prod.json"}; + * var configs = {}; + * + * async.forEachOf(obj, function (value, key, callback) { + * fs.readFile(__dirname + value, "utf8", function (err, data) { + * if (err) return callback(err); + * try { + * configs[key] = JSON.parse(data); + * } catch (e) { + * return callback(e); + * } + * callback(); + * }); + * }, function (err) { + * if (err) console.error(err.message); + * // configs is now a map of JSON data + * doSomethingWith(configs); + * }); + */ + function eachOf(coll, iteratee, callback) { + var eachOfImplementation = isArrayLike(coll) ? eachOfArrayLike : eachOfGeneric; + return eachOfImplementation(coll, wrapAsync(iteratee), callback); + } + + var eachOf$1 = awaitify(eachOf, 3); + + /** + * Produces a new collection of values by mapping each value in `coll` through + * the `iteratee` function. The `iteratee` is called with an item from `coll` + * and a callback for when it has finished processing. Each of these callback + * takes 2 arguments: an `error`, and the transformed item from `coll`. If + * `iteratee` passes an error to its callback, the main `callback` (for the + * `map` function) is immediately called with the error. + * + * Note, that since this function applies the `iteratee` to each item in + * parallel, there is no guarantee that the `iteratee` functions will complete + * in order. However, the results array will be in the same order as the + * original `coll`. + * + * If `map` is passed an Object, the results will be an Array. The results + * will roughly be in the order of the original Objects' keys (but this can + * vary across JavaScript engines). + * + * @name map + * @static + * @memberOf module:Collections + * @method + * @category Collection + * @param {Array|Iterable|AsyncIterable|Object} coll - A collection to iterate over. + * @param {AsyncFunction} iteratee - An async function to apply to each item in + * `coll`. + * The iteratee should complete with the transformed item. + * Invoked with (item, callback). + * @param {Function} [callback] - A callback which is called when all `iteratee` + * functions have finished, or an error occurs. Results is an Array of the + * transformed items from the `coll`. Invoked with (err, results). + * @returns {Promise} a promise, if no callback is passed + * @example + * + * async.map(['file1','file2','file3'], fs.stat, function(err, results) { + * // results is now an array of stats for each file + * }); + */ + function map (coll, iteratee, callback) { + return _asyncMap(eachOf$1, coll, iteratee, callback) + } + var map$1 = awaitify(map, 3); + + /** + * Applies the provided arguments to each function in the array, calling + * `callback` after all functions have completed. If you only provide the first + * argument, `fns`, then it will return a function which lets you pass in the + * arguments as if it were a single function call. If more arguments are + * provided, `callback` is required while `args` is still optional. The results + * for each of the applied async functions are passed to the final callback + * as an array. + * + * @name applyEach + * @static + * @memberOf module:ControlFlow + * @method + * @category Control Flow + * @param {Array|Iterable|AsyncIterable|Object} fns - A collection of {@link AsyncFunction}s + * to all call with the same arguments + * @param {...*} [args] - any number of separate arguments to pass to the + * function. + * @param {Function} [callback] - the final argument should be the callback, + * called when all functions have completed processing. + * @returns {AsyncFunction} - Returns a function that takes no args other than + * an optional callback, that is the result of applying the `args` to each + * of the functions. + * @example + * + * const appliedFn = async.applyEach([enableSearch, updateSchema], 'bucket') + * + * appliedFn((err, results) => { + * // results[0] is the results for `enableSearch` + * // results[1] is the results for `updateSchema` + * }); + * + * // partial application example: + * async.each( + * buckets, + * async (bucket) => async.applyEach([enableSearch, updateSchema], bucket)(), + * callback + * ); + */ + var applyEach$1 = applyEach(map$1); + + /** + * The same as [`eachOf`]{@link module:Collections.eachOf} but runs only a single async operation at a time. + * + * @name eachOfSeries + * @static + * @memberOf module:Collections + * @method + * @see [async.eachOf]{@link module:Collections.eachOf} + * @alias forEachOfSeries + * @category Collection + * @param {Array|Iterable|AsyncIterable|Object} coll - A collection to iterate over. + * @param {AsyncFunction} iteratee - An async function to apply to each item in + * `coll`. + * Invoked with (item, key, callback). + * @param {Function} [callback] - A callback which is called when all `iteratee` + * functions have finished, or an error occurs. Invoked with (err). + * @returns {Promise} a promise, if a callback is omitted + */ + function eachOfSeries(coll, iteratee, callback) { + return eachOfLimit$2(coll, 1, iteratee, callback) + } + var eachOfSeries$1 = awaitify(eachOfSeries, 3); + + /** + * The same as [`map`]{@link module:Collections.map} but runs only a single async operation at a time. + * + * @name mapSeries + * @static + * @memberOf module:Collections + * @method + * @see [async.map]{@link module:Collections.map} + * @category Collection + * @param {Array|Iterable|AsyncIterable|Object} coll - A collection to iterate over. + * @param {AsyncFunction} iteratee - An async function to apply to each item in + * `coll`. + * The iteratee should complete with the transformed item. + * Invoked with (item, callback). + * @param {Function} [callback] - A callback which is called when all `iteratee` + * functions have finished, or an error occurs. Results is an array of the + * transformed items from the `coll`. Invoked with (err, results). + * @returns {Promise} a promise, if no callback is passed + */ + function mapSeries (coll, iteratee, callback) { + return _asyncMap(eachOfSeries$1, coll, iteratee, callback) + } + var mapSeries$1 = awaitify(mapSeries, 3); + + /** + * The same as [`applyEach`]{@link module:ControlFlow.applyEach} but runs only a single async operation at a time. + * + * @name applyEachSeries + * @static + * @memberOf module:ControlFlow + * @method + * @see [async.applyEach]{@link module:ControlFlow.applyEach} + * @category Control Flow + * @param {Array|Iterable|AsyncIterable|Object} fns - A collection of {@link AsyncFunction}s to all + * call with the same arguments + * @param {...*} [args] - any number of separate arguments to pass to the + * function. + * @param {Function} [callback] - the final argument should be the callback, + * called when all functions have completed processing. + * @returns {AsyncFunction} - A function, that when called, is the result of + * appling the `args` to the list of functions. It takes no args, other than + * a callback. + */ + var applyEachSeries = applyEach(mapSeries$1); + + const PROMISE_SYMBOL = Symbol('promiseCallback'); + + function promiseCallback () { + let resolve, reject; + function callback (err, ...args) { + if (err) return reject(err) + resolve(args.length > 1 ? args : args[0]); + } + + callback[PROMISE_SYMBOL] = new Promise((res, rej) => { + resolve = res, + reject = rej; + }); + + return callback + } + + /** + * Determines the best order for running the {@link AsyncFunction}s in `tasks`, based on + * their requirements. Each function can optionally depend on other functions + * being completed first, and each function is run as soon as its requirements + * are satisfied. + * + * If any of the {@link AsyncFunction}s pass an error to their callback, the `auto` sequence + * will stop. Further tasks will not execute (so any other functions depending + * on it will not run), and the main `callback` is immediately called with the + * error. + * + * {@link AsyncFunction}s also receive an object containing the results of functions which + * have completed so far as the first argument, if they have dependencies. If a + * task function has no dependencies, it will only be passed a callback. + * + * @name auto + * @static + * @memberOf module:ControlFlow + * @method + * @category Control Flow + * @param {Object} tasks - An object. Each of its properties is either a + * function or an array of requirements, with the {@link AsyncFunction} itself the last item + * in the array. The object's key of a property serves as the name of the task + * defined by that property, i.e. can be used when specifying requirements for + * other tasks. The function receives one or two arguments: + * * a `results` object, containing the results of the previously executed + * functions, only passed if the task has any dependencies, + * * a `callback(err, result)` function, which must be called when finished, + * passing an `error` (which can be `null`) and the result of the function's + * execution. + * @param {number} [concurrency=Infinity] - An optional `integer` for + * determining the maximum number of tasks that can be run in parallel. By + * default, as many as possible. + * @param {Function} [callback] - An optional callback which is called when all + * the tasks have been completed. It receives the `err` argument if any `tasks` + * pass an error to their callback. Results are always returned; however, if an + * error occurs, no further `tasks` will be performed, and the results object + * will only contain partial results. Invoked with (err, results). + * @returns {Promise} a promise, if a callback is not passed + * @example + * + * async.auto({ + * // this function will just be passed a callback + * readData: async.apply(fs.readFile, 'data.txt', 'utf-8'), + * showData: ['readData', function(results, cb) { + * // results.readData is the file's contents + * // ... + * }] + * }, callback); + * + * async.auto({ + * get_data: function(callback) { + * console.log('in get_data'); + * // async code to get some data + * callback(null, 'data', 'converted to array'); + * }, + * make_folder: function(callback) { + * console.log('in make_folder'); + * // async code to create a directory to store a file in + * // this is run at the same time as getting the data + * callback(null, 'folder'); + * }, + * write_file: ['get_data', 'make_folder', function(results, callback) { + * console.log('in write_file', JSON.stringify(results)); + * // once there is some data and the directory exists, + * // write the data to a file in the directory + * callback(null, 'filename'); + * }], + * email_link: ['write_file', function(results, callback) { + * console.log('in email_link', JSON.stringify(results)); + * // once the file is written let's email a link to it... + * // results.write_file contains the filename returned by write_file. + * callback(null, {'file':results.write_file, 'email':'user@example.com'}); + * }] + * }, function(err, results) { + * console.log('err = ', err); + * console.log('results = ', results); + * }); + */ + function auto(tasks, concurrency, callback) { + if (typeof concurrency !== 'number') { + // concurrency is optional, shift the args. + callback = concurrency; + concurrency = null; + } + callback = once(callback || promiseCallback()); + var numTasks = Object.keys(tasks).length; + if (!numTasks) { + return callback(null); + } + if (!concurrency) { + concurrency = numTasks; + } + + var results = {}; + var runningTasks = 0; + var canceled = false; + var hasError = false; + + var listeners = Object.create(null); + + var readyTasks = []; + + // for cycle detection: + var readyToCheck = []; // tasks that have been identified as reachable + // without the possibility of returning to an ancestor task + var uncheckedDependencies = {}; + + Object.keys(tasks).forEach(key => { + var task = tasks[key]; + if (!Array.isArray(task)) { + // no dependencies + enqueueTask(key, [task]); + readyToCheck.push(key); + return; + } + + var dependencies = task.slice(0, task.length - 1); + var remainingDependencies = dependencies.length; + if (remainingDependencies === 0) { + enqueueTask(key, task); + readyToCheck.push(key); + return; + } + uncheckedDependencies[key] = remainingDependencies; + + dependencies.forEach(dependencyName => { + if (!tasks[dependencyName]) { + throw new Error('async.auto task `' + key + + '` has a non-existent dependency `' + + dependencyName + '` in ' + + dependencies.join(', ')); + } + addListener(dependencyName, () => { + remainingDependencies--; + if (remainingDependencies === 0) { + enqueueTask(key, task); + } + }); + }); + }); + + checkForDeadlocks(); + processQueue(); + + function enqueueTask(key, task) { + readyTasks.push(() => runTask(key, task)); + } + + function processQueue() { + if (canceled) return + if (readyTasks.length === 0 && runningTasks === 0) { + return callback(null, results); + } + while(readyTasks.length && runningTasks < concurrency) { + var run = readyTasks.shift(); + run(); + } + + } + + function addListener(taskName, fn) { + var taskListeners = listeners[taskName]; + if (!taskListeners) { + taskListeners = listeners[taskName] = []; + } + + taskListeners.push(fn); + } + + function taskComplete(taskName) { + var taskListeners = listeners[taskName] || []; + taskListeners.forEach(fn => fn()); + processQueue(); + } + + + function runTask(key, task) { + if (hasError) return; + + var taskCallback = onlyOnce((err, ...result) => { + runningTasks--; + if (err === false) { + canceled = true; + return + } + if (result.length < 2) { + [result] = result; + } + if (err) { + var safeResults = {}; + Object.keys(results).forEach(rkey => { + safeResults[rkey] = results[rkey]; + }); + safeResults[key] = result; + hasError = true; + listeners = Object.create(null); + if (canceled) return + callback(err, safeResults); + } else { + results[key] = result; + taskComplete(key); + } + }); + + runningTasks++; + var taskFn = wrapAsync(task[task.length - 1]); + if (task.length > 1) { + taskFn(results, taskCallback); + } else { + taskFn(taskCallback); + } + } + + function checkForDeadlocks() { + // Kahn's algorithm + // https://en.wikipedia.org/wiki/Topological_sorting#Kahn.27s_algorithm + // http://connalle.blogspot.com/2013/10/topological-sortingkahn-algorithm.html + var currentTask; + var counter = 0; + while (readyToCheck.length) { + currentTask = readyToCheck.pop(); + counter++; + getDependents(currentTask).forEach(dependent => { + if (--uncheckedDependencies[dependent] === 0) { + readyToCheck.push(dependent); + } + }); + } + + if (counter !== numTasks) { + throw new Error( + 'async.auto cannot execute tasks due to a recursive dependency' + ); + } + } + + function getDependents(taskName) { + var result = []; + Object.keys(tasks).forEach(key => { + const task = tasks[key]; + if (Array.isArray(task) && task.indexOf(taskName) >= 0) { + result.push(key); + } + }); + return result; + } + + return callback[PROMISE_SYMBOL] + } + + var FN_ARGS = /^(?:async\s+)?(?:function)?\s*\w*\s*\(\s*([^)]+)\s*\)(?:\s*{)/; + var ARROW_FN_ARGS = /^(?:async\s+)?\(?\s*([^)=]+)\s*\)?(?:\s*=>)/; + var FN_ARG_SPLIT = /,/; + var FN_ARG = /(=.+)?(\s*)$/; + var STRIP_COMMENTS = /((\/\/.*$)|(\/\*[\s\S]*?\*\/))/mg; + + function parseParams(func) { + const src = func.toString().replace(STRIP_COMMENTS, ''); + let match = src.match(FN_ARGS); + if (!match) { + match = src.match(ARROW_FN_ARGS); + } + if (!match) throw new Error('could not parse args in autoInject\nSource:\n' + src) + let [, args] = match; + return args + .replace(/\s/g, '') + .split(FN_ARG_SPLIT) + .map((arg) => arg.replace(FN_ARG, '').trim()); + } + + /** + * A dependency-injected version of the [async.auto]{@link module:ControlFlow.auto} function. Dependent + * tasks are specified as parameters to the function, after the usual callback + * parameter, with the parameter names matching the names of the tasks it + * depends on. This can provide even more readable task graphs which can be + * easier to maintain. + * + * If a final callback is specified, the task results are similarly injected, + * specified as named parameters after the initial error parameter. + * + * The autoInject function is purely syntactic sugar and its semantics are + * otherwise equivalent to [async.auto]{@link module:ControlFlow.auto}. + * + * @name autoInject + * @static + * @memberOf module:ControlFlow + * @method + * @see [async.auto]{@link module:ControlFlow.auto} + * @category Control Flow + * @param {Object} tasks - An object, each of whose properties is an {@link AsyncFunction} of + * the form 'func([dependencies...], callback). The object's key of a property + * serves as the name of the task defined by that property, i.e. can be used + * when specifying requirements for other tasks. + * * The `callback` parameter is a `callback(err, result)` which must be called + * when finished, passing an `error` (which can be `null`) and the result of + * the function's execution. The remaining parameters name other tasks on + * which the task is dependent, and the results from those tasks are the + * arguments of those parameters. + * @param {Function} [callback] - An optional callback which is called when all + * the tasks have been completed. It receives the `err` argument if any `tasks` + * pass an error to their callback, and a `results` object with any completed + * task results, similar to `auto`. + * @returns {Promise} a promise, if no callback is passed + * @example + * + * // The example from `auto` can be rewritten as follows: + * async.autoInject({ + * get_data: function(callback) { + * // async code to get some data + * callback(null, 'data', 'converted to array'); + * }, + * make_folder: function(callback) { + * // async code to create a directory to store a file in + * // this is run at the same time as getting the data + * callback(null, 'folder'); + * }, + * write_file: function(get_data, make_folder, callback) { + * // once there is some data and the directory exists, + * // write the data to a file in the directory + * callback(null, 'filename'); + * }, + * email_link: function(write_file, callback) { + * // once the file is written let's email a link to it... + * // write_file contains the filename returned by write_file. + * callback(null, {'file':write_file, 'email':'user@example.com'}); + * } + * }, function(err, results) { + * console.log('err = ', err); + * console.log('email_link = ', results.email_link); + * }); + * + * // If you are using a JS minifier that mangles parameter names, `autoInject` + * // will not work with plain functions, since the parameter names will be + * // collapsed to a single letter identifier. To work around this, you can + * // explicitly specify the names of the parameters your task function needs + * // in an array, similar to Angular.js dependency injection. + * + * // This still has an advantage over plain `auto`, since the results a task + * // depends on are still spread into arguments. + * async.autoInject({ + * //... + * write_file: ['get_data', 'make_folder', function(get_data, make_folder, callback) { + * callback(null, 'filename'); + * }], + * email_link: ['write_file', function(write_file, callback) { + * callback(null, {'file':write_file, 'email':'user@example.com'}); + * }] + * //... + * }, function(err, results) { + * console.log('err = ', err); + * console.log('email_link = ', results.email_link); + * }); + */ + function autoInject(tasks, callback) { + var newTasks = {}; + + Object.keys(tasks).forEach(key => { + var taskFn = tasks[key]; + var params; + var fnIsAsync = isAsync(taskFn); + var hasNoDeps = + (!fnIsAsync && taskFn.length === 1) || + (fnIsAsync && taskFn.length === 0); + + if (Array.isArray(taskFn)) { + params = [...taskFn]; + taskFn = params.pop(); + + newTasks[key] = params.concat(params.length > 0 ? newTask : taskFn); + } else if (hasNoDeps) { + // no dependencies, use the function as-is + newTasks[key] = taskFn; + } else { + params = parseParams(taskFn); + if ((taskFn.length === 0 && !fnIsAsync) && params.length === 0) { + throw new Error("autoInject task functions require explicit parameters."); + } + + // remove callback param + if (!fnIsAsync) params.pop(); + + newTasks[key] = params.concat(newTask); + } + + function newTask(results, taskCb) { + var newArgs = params.map(name => results[name]); + newArgs.push(taskCb); + wrapAsync(taskFn)(...newArgs); + } + }); + + return auto(newTasks, callback); + } + + // Simple doubly linked list (https://en.wikipedia.org/wiki/Doubly_linked_list) implementation + // used for queues. This implementation assumes that the node provided by the user can be modified + // to adjust the next and last properties. We implement only the minimal functionality + // for queue support. + class DLL { + constructor() { + this.head = this.tail = null; + this.length = 0; + } + + removeLink(node) { + if (node.prev) node.prev.next = node.next; + else this.head = node.next; + if (node.next) node.next.prev = node.prev; + else this.tail = node.prev; + + node.prev = node.next = null; + this.length -= 1; + return node; + } + + empty () { + while(this.head) this.shift(); + return this; + } + + insertAfter(node, newNode) { + newNode.prev = node; + newNode.next = node.next; + if (node.next) node.next.prev = newNode; + else this.tail = newNode; + node.next = newNode; + this.length += 1; + } + + insertBefore(node, newNode) { + newNode.prev = node.prev; + newNode.next = node; + if (node.prev) node.prev.next = newNode; + else this.head = newNode; + node.prev = newNode; + this.length += 1; + } + + unshift(node) { + if (this.head) this.insertBefore(this.head, node); + else setInitial(this, node); + } + + push(node) { + if (this.tail) this.insertAfter(this.tail, node); + else setInitial(this, node); + } + + shift() { + return this.head && this.removeLink(this.head); + } + + pop() { + return this.tail && this.removeLink(this.tail); + } + + toArray() { + return [...this] + } + + *[Symbol.iterator] () { + var cur = this.head; + while (cur) { + yield cur.data; + cur = cur.next; + } + } + + remove (testFn) { + var curr = this.head; + while(curr) { + var {next} = curr; + if (testFn(curr)) { + this.removeLink(curr); + } + curr = next; + } + return this; + } + } + + function setInitial(dll, node) { + dll.length = 1; + dll.head = dll.tail = node; + } + + function queue(worker, concurrency, payload) { + if (concurrency == null) { + concurrency = 1; + } + else if(concurrency === 0) { + throw new RangeError('Concurrency must not be zero'); + } + + var _worker = wrapAsync(worker); + var numRunning = 0; + var workersList = []; + const events = { + error: [], + drain: [], + saturated: [], + unsaturated: [], + empty: [] + }; + + function on (event, handler) { + events[event].push(handler); + } + + function once (event, handler) { + const handleAndRemove = (...args) => { + off(event, handleAndRemove); + handler(...args); + }; + events[event].push(handleAndRemove); + } + + function off (event, handler) { + if (!event) return Object.keys(events).forEach(ev => events[ev] = []) + if (!handler) return events[event] = [] + events[event] = events[event].filter(ev => ev !== handler); + } + + function trigger (event, ...args) { + events[event].forEach(handler => handler(...args)); + } + + var processingScheduled = false; + function _insert(data, insertAtFront, rejectOnError, callback) { + if (callback != null && typeof callback !== 'function') { + throw new Error('task callback must be a function'); + } + q.started = true; + + var res, rej; + function promiseCallback (err, ...args) { + // we don't care about the error, let the global error handler + // deal with it + if (err) return rejectOnError ? rej(err) : res() + if (args.length <= 1) return res(args[0]) + res(args); + } + + var item = { + data, + callback: rejectOnError ? + promiseCallback : + (callback || promiseCallback) + }; + + if (insertAtFront) { + q._tasks.unshift(item); + } else { + q._tasks.push(item); + } + + if (!processingScheduled) { + processingScheduled = true; + setImmediate$1(() => { + processingScheduled = false; + q.process(); + }); + } + + if (rejectOnError || !callback) { + return new Promise((resolve, reject) => { + res = resolve; + rej = reject; + }) + } + } + + function _createCB(tasks) { + return function (err, ...args) { + numRunning -= 1; + + for (var i = 0, l = tasks.length; i < l; i++) { + var task = tasks[i]; + + var index = workersList.indexOf(task); + if (index === 0) { + workersList.shift(); + } else if (index > 0) { + workersList.splice(index, 1); + } + + task.callback(err, ...args); + + if (err != null) { + trigger('error', err, task.data); + } + } + + if (numRunning <= (q.concurrency - q.buffer) ) { + trigger('unsaturated'); + } + + if (q.idle()) { + trigger('drain'); + } + q.process(); + }; + } + + function _maybeDrain(data) { + if (data.length === 0 && q.idle()) { + // call drain immediately if there are no tasks + setImmediate$1(() => trigger('drain')); + return true + } + return false + } + + const eventMethod = (name) => (handler) => { + if (!handler) { + return new Promise((resolve, reject) => { + once(name, (err, data) => { + if (err) return reject(err) + resolve(data); + }); + }) + } + off(name); + on(name, handler); + + }; + + var isProcessing = false; + var q = { + _tasks: new DLL(), + *[Symbol.iterator] () { + yield* q._tasks[Symbol.iterator](); + }, + concurrency, + payload, + buffer: concurrency / 4, + started: false, + paused: false, + push (data, callback) { + if (Array.isArray(data)) { + if (_maybeDrain(data)) return + return data.map(datum => _insert(datum, false, false, callback)) + } + return _insert(data, false, false, callback); + }, + pushAsync (data, callback) { + if (Array.isArray(data)) { + if (_maybeDrain(data)) return + return data.map(datum => _insert(datum, false, true, callback)) + } + return _insert(data, false, true, callback); + }, + kill () { + off(); + q._tasks.empty(); + }, + unshift (data, callback) { + if (Array.isArray(data)) { + if (_maybeDrain(data)) return + return data.map(datum => _insert(datum, true, false, callback)) + } + return _insert(data, true, false, callback); + }, + unshiftAsync (data, callback) { + if (Array.isArray(data)) { + if (_maybeDrain(data)) return + return data.map(datum => _insert(datum, true, true, callback)) + } + return _insert(data, true, true, callback); + }, + remove (testFn) { + q._tasks.remove(testFn); + }, + process () { + // Avoid trying to start too many processing operations. This can occur + // when callbacks resolve synchronously (#1267). + if (isProcessing) { + return; + } + isProcessing = true; + while(!q.paused && numRunning < q.concurrency && q._tasks.length){ + var tasks = [], data = []; + var l = q._tasks.length; + if (q.payload) l = Math.min(l, q.payload); + for (var i = 0; i < l; i++) { + var node = q._tasks.shift(); + tasks.push(node); + workersList.push(node); + data.push(node.data); + } + + numRunning += 1; + + if (q._tasks.length === 0) { + trigger('empty'); + } + + if (numRunning === q.concurrency) { + trigger('saturated'); + } + + var cb = onlyOnce(_createCB(tasks)); + _worker(data, cb); + } + isProcessing = false; + }, + length () { + return q._tasks.length; + }, + running () { + return numRunning; + }, + workersList () { + return workersList; + }, + idle() { + return q._tasks.length + numRunning === 0; + }, + pause () { + q.paused = true; + }, + resume () { + if (q.paused === false) { return; } + q.paused = false; + setImmediate$1(q.process); + } + }; + // define these as fixed properties, so people get useful errors when updating + Object.defineProperties(q, { + saturated: { + writable: false, + value: eventMethod('saturated') + }, + unsaturated: { + writable: false, + value: eventMethod('unsaturated') + }, + empty: { + writable: false, + value: eventMethod('empty') + }, + drain: { + writable: false, + value: eventMethod('drain') + }, + error: { + writable: false, + value: eventMethod('error') + }, + }); + return q; + } + + /** + * Creates a `cargo` object with the specified payload. Tasks added to the + * cargo will be processed altogether (up to the `payload` limit). If the + * `worker` is in progress, the task is queued until it becomes available. Once + * the `worker` has completed some tasks, each callback of those tasks is + * called. Check out [these](https://camo.githubusercontent.com/6bbd36f4cf5b35a0f11a96dcd2e97711ffc2fb37/68747470733a2f2f662e636c6f75642e6769746875622e636f6d2f6173736574732f313637363837312f36383130382f62626330636662302d356632392d313165322d393734662d3333393763363464633835382e676966) [animations](https://camo.githubusercontent.com/f4810e00e1c5f5f8addbe3e9f49064fd5d102699/68747470733a2f2f662e636c6f75642e6769746875622e636f6d2f6173736574732f313637363837312f36383130312f38346339323036362d356632392d313165322d383134662d3964336430323431336266642e676966) + * for how `cargo` and `queue` work. + * + * While [`queue`]{@link module:ControlFlow.queue} passes only one task to one of a group of workers + * at a time, cargo passes an array of tasks to a single worker, repeating + * when the worker is finished. + * + * @name cargo + * @static + * @memberOf module:ControlFlow + * @method + * @see [async.queue]{@link module:ControlFlow.queue} + * @category Control Flow + * @param {AsyncFunction} worker - An asynchronous function for processing an array + * of queued tasks. Invoked with `(tasks, callback)`. + * @param {number} [payload=Infinity] - An optional `integer` for determining + * how many tasks should be processed per round; if omitted, the default is + * unlimited. + * @returns {module:ControlFlow.QueueObject} A cargo object to manage the tasks. Callbacks can + * attached as certain properties to listen for specific events during the + * lifecycle of the cargo and inner queue. + * @example + * + * // create a cargo object with payload 2 + * var cargo = async.cargo(function(tasks, callback) { + * for (var i=0; i { + _iteratee(memo, x, (err, v) => { + memo = v; + iterCb(err); + }); + }, err => callback(err, memo)); + } + var reduce$1 = awaitify(reduce, 4); + + /** + * Version of the compose function that is more natural to read. Each function + * consumes the return value of the previous function. It is the equivalent of + * [compose]{@link module:ControlFlow.compose} with the arguments reversed. + * + * Each function is executed with the `this` binding of the composed function. + * + * @name seq + * @static + * @memberOf module:ControlFlow + * @method + * @see [async.compose]{@link module:ControlFlow.compose} + * @category Control Flow + * @param {...AsyncFunction} functions - the asynchronous functions to compose + * @returns {Function} a function that composes the `functions` in order + * @example + * + * // Requires lodash (or underscore), express3 and dresende's orm2. + * // Part of an app, that fetches cats of the logged user. + * // This example uses `seq` function to avoid overnesting and error + * // handling clutter. + * app.get('/cats', function(request, response) { + * var User = request.models.User; + * async.seq( + * _.bind(User.get, User), // 'User.get' has signature (id, callback(err, data)) + * function(user, fn) { + * user.getCats(fn); // 'getCats' has signature (callback(err, data)) + * } + * )(req.session.user_id, function (err, cats) { + * if (err) { + * console.error(err); + * response.json({ status: 'error', message: err.message }); + * } else { + * response.json({ status: 'ok', message: 'Cats found', data: cats }); + * } + * }); + * }); + */ + function seq(...functions) { + var _functions = functions.map(wrapAsync); + return function (...args) { + var that = this; + + var cb = args[args.length - 1]; + if (typeof cb == 'function') { + args.pop(); + } else { + cb = promiseCallback(); + } + + reduce$1(_functions, args, (newargs, fn, iterCb) => { + fn.apply(that, newargs.concat((err, ...nextargs) => { + iterCb(err, nextargs); + })); + }, + (err, results) => cb(err, ...results)); + + return cb[PROMISE_SYMBOL] + }; + } + + /** + * Creates a function which is a composition of the passed asynchronous + * functions. Each function consumes the return value of the function that + * follows. Composing functions `f()`, `g()`, and `h()` would produce the result + * of `f(g(h()))`, only this version uses callbacks to obtain the return values. + * + * If the last argument to the composed function is not a function, a promise + * is returned when you call it. + * + * Each function is executed with the `this` binding of the composed function. + * + * @name compose + * @static + * @memberOf module:ControlFlow + * @method + * @category Control Flow + * @param {...AsyncFunction} functions - the asynchronous functions to compose + * @returns {Function} an asynchronous function that is the composed + * asynchronous `functions` + * @example + * + * function add1(n, callback) { + * setTimeout(function () { + * callback(null, n + 1); + * }, 10); + * } + * + * function mul3(n, callback) { + * setTimeout(function () { + * callback(null, n * 3); + * }, 10); + * } + * + * var add1mul3 = async.compose(mul3, add1); + * add1mul3(4, function (err, result) { + * // result now equals 15 + * }); + */ + function compose(...args) { + return seq(...args.reverse()); + } + + /** + * The same as [`map`]{@link module:Collections.map} but runs a maximum of `limit` async operations at a time. + * + * @name mapLimit + * @static + * @memberOf module:Collections + * @method + * @see [async.map]{@link module:Collections.map} + * @category Collection + * @param {Array|Iterable|AsyncIterable|Object} coll - A collection to iterate over. + * @param {number} limit - The maximum number of async operations at a time. + * @param {AsyncFunction} iteratee - An async function to apply to each item in + * `coll`. + * The iteratee should complete with the transformed item. + * Invoked with (item, callback). + * @param {Function} [callback] - A callback which is called when all `iteratee` + * functions have finished, or an error occurs. Results is an array of the + * transformed items from the `coll`. Invoked with (err, results). + * @returns {Promise} a promise, if no callback is passed + */ + function mapLimit (coll, limit, iteratee, callback) { + return _asyncMap(eachOfLimit(limit), coll, iteratee, callback) + } + var mapLimit$1 = awaitify(mapLimit, 4); + + /** + * The same as [`concat`]{@link module:Collections.concat} but runs a maximum of `limit` async operations at a time. + * + * @name concatLimit + * @static + * @memberOf module:Collections + * @method + * @see [async.concat]{@link module:Collections.concat} + * @category Collection + * @alias flatMapLimit + * @param {Array|Iterable|AsyncIterable|Object} coll - A collection to iterate over. + * @param {number} limit - The maximum number of async operations at a time. + * @param {AsyncFunction} iteratee - A function to apply to each item in `coll`, + * which should use an array as its result. Invoked with (item, callback). + * @param {Function} [callback] - A callback which is called after all the + * `iteratee` functions have finished, or an error occurs. Results is an array + * containing the concatenated results of the `iteratee` function. Invoked with + * (err, results). + * @returns A Promise, if no callback is passed + */ + function concatLimit(coll, limit, iteratee, callback) { + var _iteratee = wrapAsync(iteratee); + return mapLimit$1(coll, limit, (val, iterCb) => { + _iteratee(val, (err, ...args) => { + if (err) return iterCb(err); + return iterCb(err, args); + }); + }, (err, mapResults) => { + var result = []; + for (var i = 0; i < mapResults.length; i++) { + if (mapResults[i]) { + result = result.concat(...mapResults[i]); + } + } + + return callback(err, result); + }); + } + var concatLimit$1 = awaitify(concatLimit, 4); + + /** + * Applies `iteratee` to each item in `coll`, concatenating the results. Returns + * the concatenated list. The `iteratee`s are called in parallel, and the + * results are concatenated as they return. The results array will be returned in + * the original order of `coll` passed to the `iteratee` function. + * + * @name concat + * @static + * @memberOf module:Collections + * @method + * @category Collection + * @alias flatMap + * @param {Array|Iterable|AsyncIterable|Object} coll - A collection to iterate over. + * @param {AsyncFunction} iteratee - A function to apply to each item in `coll`, + * which should use an array as its result. Invoked with (item, callback). + * @param {Function} [callback] - A callback which is called after all the + * `iteratee` functions have finished, or an error occurs. Results is an array + * containing the concatenated results of the `iteratee` function. Invoked with + * (err, results). + * @returns A Promise, if no callback is passed + * @example + * + * async.concat(['dir1','dir2','dir3'], fs.readdir, function(err, files) { + * // files is now a list of filenames that exist in the 3 directories + * }); + */ + function concat(coll, iteratee, callback) { + return concatLimit$1(coll, Infinity, iteratee, callback) + } + var concat$1 = awaitify(concat, 3); + + /** + * The same as [`concat`]{@link module:Collections.concat} but runs only a single async operation at a time. + * + * @name concatSeries + * @static + * @memberOf module:Collections + * @method + * @see [async.concat]{@link module:Collections.concat} + * @category Collection + * @alias flatMapSeries + * @param {Array|Iterable|AsyncIterable|Object} coll - A collection to iterate over. + * @param {AsyncFunction} iteratee - A function to apply to each item in `coll`. + * The iteratee should complete with an array an array of results. + * Invoked with (item, callback). + * @param {Function} [callback] - A callback which is called after all the + * `iteratee` functions have finished, or an error occurs. Results is an array + * containing the concatenated results of the `iteratee` function. Invoked with + * (err, results). + * @returns A Promise, if no callback is passed + */ + function concatSeries(coll, iteratee, callback) { + return concatLimit$1(coll, 1, iteratee, callback) + } + var concatSeries$1 = awaitify(concatSeries, 3); + + /** + * Returns a function that when called, calls-back with the values provided. + * Useful as the first function in a [`waterfall`]{@link module:ControlFlow.waterfall}, or for plugging values in to + * [`auto`]{@link module:ControlFlow.auto}. + * + * @name constant + * @static + * @memberOf module:Utils + * @method + * @category Util + * @param {...*} arguments... - Any number of arguments to automatically invoke + * callback with. + * @returns {AsyncFunction} Returns a function that when invoked, automatically + * invokes the callback with the previous given arguments. + * @example + * + * async.waterfall([ + * async.constant(42), + * function (value, next) { + * // value === 42 + * }, + * //... + * ], callback); + * + * async.waterfall([ + * async.constant(filename, "utf8"), + * fs.readFile, + * function (fileData, next) { + * //... + * } + * //... + * ], callback); + * + * async.auto({ + * hostname: async.constant("https://server.net/"), + * port: findFreePort, + * launchServer: ["hostname", "port", function (options, cb) { + * startServer(options, cb); + * }], + * //... + * }, callback); + */ + function constant(...args) { + return function (...ignoredArgs/*, callback*/) { + var callback = ignoredArgs.pop(); + return callback(null, ...args); + }; + } + + function _createTester(check, getResult) { + return (eachfn, arr, _iteratee, cb) => { + var testPassed = false; + var testResult; + const iteratee = wrapAsync(_iteratee); + eachfn(arr, (value, _, callback) => { + iteratee(value, (err, result) => { + if (err || err === false) return callback(err); + + if (check(result) && !testResult) { + testPassed = true; + testResult = getResult(true, value); + return callback(null, breakLoop); + } + callback(); + }); + }, err => { + if (err) return cb(err); + cb(null, testPassed ? testResult : getResult(false)); + }); + }; + } + + /** + * Returns the first value in `coll` that passes an async truth test. The + * `iteratee` is applied in parallel, meaning the first iteratee to return + * `true` will fire the detect `callback` with that result. That means the + * result might not be the first item in the original `coll` (in terms of order) + * that passes the test. + + * If order within the original `coll` is important, then look at + * [`detectSeries`]{@link module:Collections.detectSeries}. + * + * @name detect + * @static + * @memberOf module:Collections + * @method + * @alias find + * @category Collections + * @param {Array|Iterable|AsyncIterable|Object} coll - A collection to iterate over. + * @param {AsyncFunction} iteratee - A truth test to apply to each item in `coll`. + * The iteratee must complete with a boolean value as its result. + * Invoked with (item, callback). + * @param {Function} [callback] - A callback which is called as soon as any + * iteratee returns `true`, or after all the `iteratee` functions have finished. + * Result will be the first item in the array that passes the truth test + * (iteratee) or the value `undefined` if none passed. Invoked with + * (err, result). + * @returns A Promise, if no callback is passed + * @example + * + * async.detect(['file1','file2','file3'], function(filePath, callback) { + * fs.access(filePath, function(err) { + * callback(null, !err) + * }); + * }, function(err, result) { + * // result now equals the first file in the list that exists + * }); + */ + function detect(coll, iteratee, callback) { + return _createTester(bool => bool, (res, item) => item)(eachOf$1, coll, iteratee, callback) + } + var detect$1 = awaitify(detect, 3); + + /** + * The same as [`detect`]{@link module:Collections.detect} but runs a maximum of `limit` async operations at a + * time. + * + * @name detectLimit + * @static + * @memberOf module:Collections + * @method + * @see [async.detect]{@link module:Collections.detect} + * @alias findLimit + * @category Collections + * @param {Array|Iterable|AsyncIterable|Object} coll - A collection to iterate over. + * @param {number} limit - The maximum number of async operations at a time. + * @param {AsyncFunction} iteratee - A truth test to apply to each item in `coll`. + * The iteratee must complete with a boolean value as its result. + * Invoked with (item, callback). + * @param {Function} [callback] - A callback which is called as soon as any + * iteratee returns `true`, or after all the `iteratee` functions have finished. + * Result will be the first item in the array that passes the truth test + * (iteratee) or the value `undefined` if none passed. Invoked with + * (err, result). + * @returns a Promise if no callback is passed + */ + function detectLimit(coll, limit, iteratee, callback) { + return _createTester(bool => bool, (res, item) => item)(eachOfLimit(limit), coll, iteratee, callback) + } + var detectLimit$1 = awaitify(detectLimit, 4); + + /** + * The same as [`detect`]{@link module:Collections.detect} but runs only a single async operation at a time. + * + * @name detectSeries + * @static + * @memberOf module:Collections + * @method + * @see [async.detect]{@link module:Collections.detect} + * @alias findSeries + * @category Collections + * @param {Array|Iterable|AsyncIterable|Object} coll - A collection to iterate over. + * @param {AsyncFunction} iteratee - A truth test to apply to each item in `coll`. + * The iteratee must complete with a boolean value as its result. + * Invoked with (item, callback). + * @param {Function} [callback] - A callback which is called as soon as any + * iteratee returns `true`, or after all the `iteratee` functions have finished. + * Result will be the first item in the array that passes the truth test + * (iteratee) or the value `undefined` if none passed. Invoked with + * (err, result). + * @returns a Promise if no callback is passed + */ + function detectSeries(coll, iteratee, callback) { + return _createTester(bool => bool, (res, item) => item)(eachOfLimit(1), coll, iteratee, callback) + } + + var detectSeries$1 = awaitify(detectSeries, 3); + + function consoleFunc(name) { + return (fn, ...args) => wrapAsync(fn)(...args, (err, ...resultArgs) => { + if (typeof console === 'object') { + if (err) { + if (console.error) { + console.error(err); + } + } else if (console[name]) { + resultArgs.forEach(x => console[name](x)); + } + } + }) + } + + /** + * Logs the result of an [`async` function]{@link AsyncFunction} to the + * `console` using `console.dir` to display the properties of the resulting object. + * Only works in Node.js or in browsers that support `console.dir` and + * `console.error` (such as FF and Chrome). + * If multiple arguments are returned from the async function, + * `console.dir` is called on each argument in order. + * + * @name dir + * @static + * @memberOf module:Utils + * @method + * @category Util + * @param {AsyncFunction} function - The function you want to eventually apply + * all arguments to. + * @param {...*} arguments... - Any number of arguments to apply to the function. + * @example + * + * // in a module + * var hello = function(name, callback) { + * setTimeout(function() { + * callback(null, {hello: name}); + * }, 1000); + * }; + * + * // in the node repl + * node> async.dir(hello, 'world'); + * {hello: 'world'} + */ + var dir = consoleFunc('dir'); + + /** + * The post-check version of [`whilst`]{@link module:ControlFlow.whilst}. To reflect the difference in + * the order of operations, the arguments `test` and `iteratee` are switched. + * + * `doWhilst` is to `whilst` as `do while` is to `while` in plain JavaScript. + * + * @name doWhilst + * @static + * @memberOf module:ControlFlow + * @method + * @see [async.whilst]{@link module:ControlFlow.whilst} + * @category Control Flow + * @param {AsyncFunction} iteratee - A function which is called each time `test` + * passes. Invoked with (callback). + * @param {AsyncFunction} test - asynchronous truth test to perform after each + * execution of `iteratee`. Invoked with (...args, callback), where `...args` are the + * non-error args from the previous callback of `iteratee`. + * @param {Function} [callback] - A callback which is called after the test + * function has failed and repeated execution of `iteratee` has stopped. + * `callback` will be passed an error and any arguments passed to the final + * `iteratee`'s callback. Invoked with (err, [results]); + * @returns {Promise} a promise, if no callback is passed + */ + function doWhilst(iteratee, test, callback) { + callback = onlyOnce(callback); + var _fn = wrapAsync(iteratee); + var _test = wrapAsync(test); + var results; + + function next(err, ...args) { + if (err) return callback(err); + if (err === false) return; + results = args; + _test(...args, check); + } + + function check(err, truth) { + if (err) return callback(err); + if (err === false) return; + if (!truth) return callback(null, ...results); + _fn(next); + } + + return check(null, true); + } + + var doWhilst$1 = awaitify(doWhilst, 3); + + /** + * Like ['doWhilst']{@link module:ControlFlow.doWhilst}, except the `test` is inverted. Note the + * argument ordering differs from `until`. + * + * @name doUntil + * @static + * @memberOf module:ControlFlow + * @method + * @see [async.doWhilst]{@link module:ControlFlow.doWhilst} + * @category Control Flow + * @param {AsyncFunction} iteratee - An async function which is called each time + * `test` fails. Invoked with (callback). + * @param {AsyncFunction} test - asynchronous truth test to perform after each + * execution of `iteratee`. Invoked with (...args, callback), where `...args` are the + * non-error args from the previous callback of `iteratee` + * @param {Function} [callback] - A callback which is called after the test + * function has passed and repeated execution of `iteratee` has stopped. `callback` + * will be passed an error and any arguments passed to the final `iteratee`'s + * callback. Invoked with (err, [results]); + * @returns {Promise} a promise, if no callback is passed + */ + function doUntil(iteratee, test, callback) { + const _test = wrapAsync(test); + return doWhilst$1(iteratee, (...args) => { + const cb = args.pop(); + _test(...args, (err, truth) => cb (err, !truth)); + }, callback); + } + + function _withoutIndex(iteratee) { + return (value, index, callback) => iteratee(value, callback); + } + + /** + * Applies the function `iteratee` to each item in `coll`, in parallel. + * The `iteratee` is called with an item from the list, and a callback for when + * it has finished. If the `iteratee` passes an error to its `callback`, the + * main `callback` (for the `each` function) is immediately called with the + * error. + * + * Note, that since this function applies `iteratee` to each item in parallel, + * there is no guarantee that the iteratee functions will complete in order. + * + * @name each + * @static + * @memberOf module:Collections + * @method + * @alias forEach + * @category Collection + * @param {Array|Iterable|AsyncIterable|Object} coll - A collection to iterate over. + * @param {AsyncFunction} iteratee - An async function to apply to + * each item in `coll`. Invoked with (item, callback). + * The array index is not passed to the iteratee. + * If you need the index, use `eachOf`. + * @param {Function} [callback] - A callback which is called when all + * `iteratee` functions have finished, or an error occurs. Invoked with (err). + * @returns {Promise} a promise, if a callback is omitted + * @example + * + * // assuming openFiles is an array of file names and saveFile is a function + * // to save the modified contents of that file: + * + * async.each(openFiles, saveFile, function(err){ + * // if any of the saves produced an error, err would equal that error + * }); + * + * // assuming openFiles is an array of file names + * async.each(openFiles, function(file, callback) { + * + * // Perform operation on file here. + * console.log('Processing file ' + file); + * + * if( file.length > 32 ) { + * console.log('This file name is too long'); + * callback('File name too long'); + * } else { + * // Do work to process file here + * console.log('File processed'); + * callback(); + * } + * }, function(err) { + * // if any of the file processing produced an error, err would equal that error + * if( err ) { + * // One of the iterations produced an error. + * // All processing will now stop. + * console.log('A file failed to process'); + * } else { + * console.log('All files have been processed successfully'); + * } + * }); + */ + function eachLimit(coll, iteratee, callback) { + return eachOf$1(coll, _withoutIndex(wrapAsync(iteratee)), callback); + } + + var each = awaitify(eachLimit, 3); + + /** + * The same as [`each`]{@link module:Collections.each} but runs a maximum of `limit` async operations at a time. + * + * @name eachLimit + * @static + * @memberOf module:Collections + * @method + * @see [async.each]{@link module:Collections.each} + * @alias forEachLimit + * @category Collection + * @param {Array|Iterable|AsyncIterable|Object} coll - A collection to iterate over. + * @param {number} limit - The maximum number of async operations at a time. + * @param {AsyncFunction} iteratee - An async function to apply to each item in + * `coll`. + * The array index is not passed to the iteratee. + * If you need the index, use `eachOfLimit`. + * Invoked with (item, callback). + * @param {Function} [callback] - A callback which is called when all + * `iteratee` functions have finished, or an error occurs. Invoked with (err). + * @returns {Promise} a promise, if a callback is omitted + */ + function eachLimit$1(coll, limit, iteratee, callback) { + return eachOfLimit(limit)(coll, _withoutIndex(wrapAsync(iteratee)), callback); + } + var eachLimit$2 = awaitify(eachLimit$1, 4); + + /** + * The same as [`each`]{@link module:Collections.each} but runs only a single async operation at a time. + * + * Note, that unlike [`each`]{@link module:Collections.each}, this function applies iteratee to each item + * in series and therefore the iteratee functions will complete in order. + + * @name eachSeries + * @static + * @memberOf module:Collections + * @method + * @see [async.each]{@link module:Collections.each} + * @alias forEachSeries + * @category Collection + * @param {Array|Iterable|AsyncIterable|Object} coll - A collection to iterate over. + * @param {AsyncFunction} iteratee - An async function to apply to each + * item in `coll`. + * The array index is not passed to the iteratee. + * If you need the index, use `eachOfSeries`. + * Invoked with (item, callback). + * @param {Function} [callback] - A callback which is called when all + * `iteratee` functions have finished, or an error occurs. Invoked with (err). + * @returns {Promise} a promise, if a callback is omitted + */ + function eachSeries(coll, iteratee, callback) { + return eachLimit$2(coll, 1, iteratee, callback) + } + var eachSeries$1 = awaitify(eachSeries, 3); + + /** + * Wrap an async function and ensure it calls its callback on a later tick of + * the event loop. If the function already calls its callback on a next tick, + * no extra deferral is added. This is useful for preventing stack overflows + * (`RangeError: Maximum call stack size exceeded`) and generally keeping + * [Zalgo](http://blog.izs.me/post/59142742143/designing-apis-for-asynchrony) + * contained. ES2017 `async` functions are returned as-is -- they are immune + * to Zalgo's corrupting influences, as they always resolve on a later tick. + * + * @name ensureAsync + * @static + * @memberOf module:Utils + * @method + * @category Util + * @param {AsyncFunction} fn - an async function, one that expects a node-style + * callback as its last argument. + * @returns {AsyncFunction} Returns a wrapped function with the exact same call + * signature as the function passed in. + * @example + * + * function sometimesAsync(arg, callback) { + * if (cache[arg]) { + * return callback(null, cache[arg]); // this would be synchronous!! + * } else { + * doSomeIO(arg, callback); // this IO would be asynchronous + * } + * } + * + * // this has a risk of stack overflows if many results are cached in a row + * async.mapSeries(args, sometimesAsync, done); + * + * // this will defer sometimesAsync's callback if necessary, + * // preventing stack overflows + * async.mapSeries(args, async.ensureAsync(sometimesAsync), done); + */ + function ensureAsync(fn) { + if (isAsync(fn)) return fn; + return function (...args/*, callback*/) { + var callback = args.pop(); + var sync = true; + args.push((...innerArgs) => { + if (sync) { + setImmediate$1(() => callback(...innerArgs)); + } else { + callback(...innerArgs); + } + }); + fn.apply(this, args); + sync = false; + }; + } + + /** + * Returns `true` if every element in `coll` satisfies an async test. If any + * iteratee call returns `false`, the main `callback` is immediately called. + * + * @name every + * @static + * @memberOf module:Collections + * @method + * @alias all + * @category Collection + * @param {Array|Iterable|AsyncIterable|Object} coll - A collection to iterate over. + * @param {AsyncFunction} iteratee - An async truth test to apply to each item + * in the collection in parallel. + * The iteratee must complete with a boolean result value. + * Invoked with (item, callback). + * @param {Function} [callback] - A callback which is called after all the + * `iteratee` functions have finished. Result will be either `true` or `false` + * depending on the values of the async tests. Invoked with (err, result). + * @returns {Promise} a promise, if no callback provided + * @example + * + * async.every(['file1','file2','file3'], function(filePath, callback) { + * fs.access(filePath, function(err) { + * callback(null, !err) + * }); + * }, function(err, result) { + * // if result is true then every file exists + * }); + */ + function every(coll, iteratee, callback) { + return _createTester(bool => !bool, res => !res)(eachOf$1, coll, iteratee, callback) + } + var every$1 = awaitify(every, 3); + + /** + * The same as [`every`]{@link module:Collections.every} but runs a maximum of `limit` async operations at a time. + * + * @name everyLimit + * @static + * @memberOf module:Collections + * @method + * @see [async.every]{@link module:Collections.every} + * @alias allLimit + * @category Collection + * @param {Array|Iterable|AsyncIterable|Object} coll - A collection to iterate over. + * @param {number} limit - The maximum number of async operations at a time. + * @param {AsyncFunction} iteratee - An async truth test to apply to each item + * in the collection in parallel. + * The iteratee must complete with a boolean result value. + * Invoked with (item, callback). + * @param {Function} [callback] - A callback which is called after all the + * `iteratee` functions have finished. Result will be either `true` or `false` + * depending on the values of the async tests. Invoked with (err, result). + * @returns {Promise} a promise, if no callback provided + */ + function everyLimit(coll, limit, iteratee, callback) { + return _createTester(bool => !bool, res => !res)(eachOfLimit(limit), coll, iteratee, callback) + } + var everyLimit$1 = awaitify(everyLimit, 4); + + /** + * The same as [`every`]{@link module:Collections.every} but runs only a single async operation at a time. + * + * @name everySeries + * @static + * @memberOf module:Collections + * @method + * @see [async.every]{@link module:Collections.every} + * @alias allSeries + * @category Collection + * @param {Array|Iterable|AsyncIterable|Object} coll - A collection to iterate over. + * @param {AsyncFunction} iteratee - An async truth test to apply to each item + * in the collection in series. + * The iteratee must complete with a boolean result value. + * Invoked with (item, callback). + * @param {Function} [callback] - A callback which is called after all the + * `iteratee` functions have finished. Result will be either `true` or `false` + * depending on the values of the async tests. Invoked with (err, result). + * @returns {Promise} a promise, if no callback provided + */ + function everySeries(coll, iteratee, callback) { + return _createTester(bool => !bool, res => !res)(eachOfSeries$1, coll, iteratee, callback) + } + var everySeries$1 = awaitify(everySeries, 3); + + function filterArray(eachfn, arr, iteratee, callback) { + var truthValues = new Array(arr.length); + eachfn(arr, (x, index, iterCb) => { + iteratee(x, (err, v) => { + truthValues[index] = !!v; + iterCb(err); + }); + }, err => { + if (err) return callback(err); + var results = []; + for (var i = 0; i < arr.length; i++) { + if (truthValues[i]) results.push(arr[i]); + } + callback(null, results); + }); + } + + function filterGeneric(eachfn, coll, iteratee, callback) { + var results = []; + eachfn(coll, (x, index, iterCb) => { + iteratee(x, (err, v) => { + if (err) return iterCb(err); + if (v) { + results.push({index, value: x}); + } + iterCb(err); + }); + }, err => { + if (err) return callback(err); + callback(null, results + .sort((a, b) => a.index - b.index) + .map(v => v.value)); + }); + } + + function _filter(eachfn, coll, iteratee, callback) { + var filter = isArrayLike(coll) ? filterArray : filterGeneric; + return filter(eachfn, coll, wrapAsync(iteratee), callback); + } + + /** + * Returns a new array of all the values in `coll` which pass an async truth + * test. This operation is performed in parallel, but the results array will be + * in the same order as the original. + * + * @name filter + * @static + * @memberOf module:Collections + * @method + * @alias select + * @category Collection + * @param {Array|Iterable|AsyncIterable|Object} coll - A collection to iterate over. + * @param {Function} iteratee - A truth test to apply to each item in `coll`. + * The `iteratee` is passed a `callback(err, truthValue)`, which must be called + * with a boolean argument once it has completed. Invoked with (item, callback). + * @param {Function} [callback] - A callback which is called after all the + * `iteratee` functions have finished. Invoked with (err, results). + * @returns {Promise} a promise, if no callback provided + * @example + * + * async.filter(['file1','file2','file3'], function(filePath, callback) { + * fs.access(filePath, function(err) { + * callback(null, !err) + * }); + * }, function(err, results) { + * // results now equals an array of the existing files + * }); + */ + function filter (coll, iteratee, callback) { + return _filter(eachOf$1, coll, iteratee, callback) + } + var filter$1 = awaitify(filter, 3); + + /** + * The same as [`filter`]{@link module:Collections.filter} but runs a maximum of `limit` async operations at a + * time. + * + * @name filterLimit + * @static + * @memberOf module:Collections + * @method + * @see [async.filter]{@link module:Collections.filter} + * @alias selectLimit + * @category Collection + * @param {Array|Iterable|AsyncIterable|Object} coll - A collection to iterate over. + * @param {number} limit - The maximum number of async operations at a time. + * @param {Function} iteratee - A truth test to apply to each item in `coll`. + * The `iteratee` is passed a `callback(err, truthValue)`, which must be called + * with a boolean argument once it has completed. Invoked with (item, callback). + * @param {Function} [callback] - A callback which is called after all the + * `iteratee` functions have finished. Invoked with (err, results). + * @returns {Promise} a promise, if no callback provided + */ + function filterLimit (coll, limit, iteratee, callback) { + return _filter(eachOfLimit(limit), coll, iteratee, callback) + } + var filterLimit$1 = awaitify(filterLimit, 4); + + /** + * The same as [`filter`]{@link module:Collections.filter} but runs only a single async operation at a time. + * + * @name filterSeries + * @static + * @memberOf module:Collections + * @method + * @see [async.filter]{@link module:Collections.filter} + * @alias selectSeries + * @category Collection + * @param {Array|Iterable|AsyncIterable|Object} coll - A collection to iterate over. + * @param {Function} iteratee - A truth test to apply to each item in `coll`. + * The `iteratee` is passed a `callback(err, truthValue)`, which must be called + * with a boolean argument once it has completed. Invoked with (item, callback). + * @param {Function} [callback] - A callback which is called after all the + * `iteratee` functions have finished. Invoked with (err, results) + * @returns {Promise} a promise, if no callback provided + */ + function filterSeries (coll, iteratee, callback) { + return _filter(eachOfSeries$1, coll, iteratee, callback) + } + var filterSeries$1 = awaitify(filterSeries, 3); + + /** + * Calls the asynchronous function `fn` with a callback parameter that allows it + * to call itself again, in series, indefinitely. + + * If an error is passed to the callback then `errback` is called with the + * error, and execution stops, otherwise it will never be called. + * + * @name forever + * @static + * @memberOf module:ControlFlow + * @method + * @category Control Flow + * @param {AsyncFunction} fn - an async function to call repeatedly. + * Invoked with (next). + * @param {Function} [errback] - when `fn` passes an error to it's callback, + * this function will be called, and execution stops. Invoked with (err). + * @returns {Promise} a promise that rejects if an error occurs and an errback + * is not passed + * @example + * + * async.forever( + * function(next) { + * // next is suitable for passing to things that need a callback(err [, whatever]); + * // it will result in this function being called again. + * }, + * function(err) { + * // if next is called with a value in its first parameter, it will appear + * // in here as 'err', and execution will stop. + * } + * ); + */ + function forever(fn, errback) { + var done = onlyOnce(errback); + var task = wrapAsync(ensureAsync(fn)); + + function next(err) { + if (err) return done(err); + if (err === false) return; + task(next); + } + return next(); + } + var forever$1 = awaitify(forever, 2); + + /** + * The same as [`groupBy`]{@link module:Collections.groupBy} but runs a maximum of `limit` async operations at a time. + * + * @name groupByLimit + * @static + * @memberOf module:Collections + * @method + * @see [async.groupBy]{@link module:Collections.groupBy} + * @category Collection + * @param {Array|Iterable|AsyncIterable|Object} coll - A collection to iterate over. + * @param {number} limit - The maximum number of async operations at a time. + * @param {AsyncFunction} iteratee - An async function to apply to each item in + * `coll`. + * The iteratee should complete with a `key` to group the value under. + * Invoked with (value, callback). + * @param {Function} [callback] - A callback which is called when all `iteratee` + * functions have finished, or an error occurs. Result is an `Object` whoses + * properties are arrays of values which returned the corresponding key. + * @returns {Promise} a promise, if no callback is passed + */ + function groupByLimit(coll, limit, iteratee, callback) { + var _iteratee = wrapAsync(iteratee); + return mapLimit$1(coll, limit, (val, iterCb) => { + _iteratee(val, (err, key) => { + if (err) return iterCb(err); + return iterCb(err, {key, val}); + }); + }, (err, mapResults) => { + var result = {}; + // from MDN, handle object having an `hasOwnProperty` prop + var {hasOwnProperty} = Object.prototype; + + for (var i = 0; i < mapResults.length; i++) { + if (mapResults[i]) { + var {key} = mapResults[i]; + var {val} = mapResults[i]; + + if (hasOwnProperty.call(result, key)) { + result[key].push(val); + } else { + result[key] = [val]; + } + } + } + + return callback(err, result); + }); + } + + var groupByLimit$1 = awaitify(groupByLimit, 4); + + /** + * Returns a new object, where each value corresponds to an array of items, from + * `coll`, that returned the corresponding key. That is, the keys of the object + * correspond to the values passed to the `iteratee` callback. + * + * Note: Since this function applies the `iteratee` to each item in parallel, + * there is no guarantee that the `iteratee` functions will complete in order. + * However, the values for each key in the `result` will be in the same order as + * the original `coll`. For Objects, the values will roughly be in the order of + * the original Objects' keys (but this can vary across JavaScript engines). + * + * @name groupBy + * @static + * @memberOf module:Collections + * @method + * @category Collection + * @param {Array|Iterable|AsyncIterable|Object} coll - A collection to iterate over. + * @param {AsyncFunction} iteratee - An async function to apply to each item in + * `coll`. + * The iteratee should complete with a `key` to group the value under. + * Invoked with (value, callback). + * @param {Function} [callback] - A callback which is called when all `iteratee` + * functions have finished, or an error occurs. Result is an `Object` whoses + * properties are arrays of values which returned the corresponding key. + * @returns {Promise} a promise, if no callback is passed + * @example + * + * async.groupBy(['userId1', 'userId2', 'userId3'], function(userId, callback) { + * db.findById(userId, function(err, user) { + * if (err) return callback(err); + * return callback(null, user.age); + * }); + * }, function(err, result) { + * // result is object containing the userIds grouped by age + * // e.g. { 30: ['userId1', 'userId3'], 42: ['userId2']}; + * }); + */ + function groupBy (coll, iteratee, callback) { + return groupByLimit$1(coll, Infinity, iteratee, callback) + } + + /** + * The same as [`groupBy`]{@link module:Collections.groupBy} but runs only a single async operation at a time. + * + * @name groupBySeries + * @static + * @memberOf module:Collections + * @method + * @see [async.groupBy]{@link module:Collections.groupBy} + * @category Collection + * @param {Array|Iterable|AsyncIterable|Object} coll - A collection to iterate over. + * @param {AsyncFunction} iteratee - An async function to apply to each item in + * `coll`. + * The iteratee should complete with a `key` to group the value under. + * Invoked with (value, callback). + * @param {Function} [callback] - A callback which is called when all `iteratee` + * functions have finished, or an error occurs. Result is an `Object` whoses + * properties are arrays of values which returned the corresponding key. + * @returns {Promise} a promise, if no callback is passed + */ + function groupBySeries (coll, iteratee, callback) { + return groupByLimit$1(coll, 1, iteratee, callback) + } + + /** + * Logs the result of an `async` function to the `console`. Only works in + * Node.js or in browsers that support `console.log` and `console.error` (such + * as FF and Chrome). If multiple arguments are returned from the async + * function, `console.log` is called on each argument in order. + * + * @name log + * @static + * @memberOf module:Utils + * @method + * @category Util + * @param {AsyncFunction} function - The function you want to eventually apply + * all arguments to. + * @param {...*} arguments... - Any number of arguments to apply to the function. + * @example + * + * // in a module + * var hello = function(name, callback) { + * setTimeout(function() { + * callback(null, 'hello ' + name); + * }, 1000); + * }; + * + * // in the node repl + * node> async.log(hello, 'world'); + * 'hello world' + */ + var log = consoleFunc('log'); + + /** + * The same as [`mapValues`]{@link module:Collections.mapValues} but runs a maximum of `limit` async operations at a + * time. + * + * @name mapValuesLimit + * @static + * @memberOf module:Collections + * @method + * @see [async.mapValues]{@link module:Collections.mapValues} + * @category Collection + * @param {Object} obj - A collection to iterate over. + * @param {number} limit - The maximum number of async operations at a time. + * @param {AsyncFunction} iteratee - A function to apply to each value and key + * in `coll`. + * The iteratee should complete with the transformed value as its result. + * Invoked with (value, key, callback). + * @param {Function} [callback] - A callback which is called when all `iteratee` + * functions have finished, or an error occurs. `result` is a new object consisting + * of each key from `obj`, with each transformed value on the right-hand side. + * Invoked with (err, result). + * @returns {Promise} a promise, if no callback is passed + */ + function mapValuesLimit(obj, limit, iteratee, callback) { + callback = once(callback); + var newObj = {}; + var _iteratee = wrapAsync(iteratee); + return eachOfLimit(limit)(obj, (val, key, next) => { + _iteratee(val, key, (err, result) => { + if (err) return next(err); + newObj[key] = result; + next(err); + }); + }, err => callback(err, newObj)); + } + + var mapValuesLimit$1 = awaitify(mapValuesLimit, 4); + + /** + * A relative of [`map`]{@link module:Collections.map}, designed for use with objects. + * + * Produces a new Object by mapping each value of `obj` through the `iteratee` + * function. The `iteratee` is called each `value` and `key` from `obj` and a + * callback for when it has finished processing. Each of these callbacks takes + * two arguments: an `error`, and the transformed item from `obj`. If `iteratee` + * passes an error to its callback, the main `callback` (for the `mapValues` + * function) is immediately called with the error. + * + * Note, the order of the keys in the result is not guaranteed. The keys will + * be roughly in the order they complete, (but this is very engine-specific) + * + * @name mapValues + * @static + * @memberOf module:Collections + * @method + * @category Collection + * @param {Object} obj - A collection to iterate over. + * @param {AsyncFunction} iteratee - A function to apply to each value and key + * in `coll`. + * The iteratee should complete with the transformed value as its result. + * Invoked with (value, key, callback). + * @param {Function} [callback] - A callback which is called when all `iteratee` + * functions have finished, or an error occurs. `result` is a new object consisting + * of each key from `obj`, with each transformed value on the right-hand side. + * Invoked with (err, result). + * @returns {Promise} a promise, if no callback is passed + * @example + * + * async.mapValues({ + * f1: 'file1', + * f2: 'file2', + * f3: 'file3' + * }, function (file, key, callback) { + * fs.stat(file, callback); + * }, function(err, result) { + * // result is now a map of stats for each file, e.g. + * // { + * // f1: [stats for file1], + * // f2: [stats for file2], + * // f3: [stats for file3] + * // } + * }); + */ + function mapValues(obj, iteratee, callback) { + return mapValuesLimit$1(obj, Infinity, iteratee, callback) + } + + /** + * The same as [`mapValues`]{@link module:Collections.mapValues} but runs only a single async operation at a time. + * + * @name mapValuesSeries + * @static + * @memberOf module:Collections + * @method + * @see [async.mapValues]{@link module:Collections.mapValues} + * @category Collection + * @param {Object} obj - A collection to iterate over. + * @param {AsyncFunction} iteratee - A function to apply to each value and key + * in `coll`. + * The iteratee should complete with the transformed value as its result. + * Invoked with (value, key, callback). + * @param {Function} [callback] - A callback which is called when all `iteratee` + * functions have finished, or an error occurs. `result` is a new object consisting + * of each key from `obj`, with each transformed value on the right-hand side. + * Invoked with (err, result). + * @returns {Promise} a promise, if no callback is passed + */ + function mapValuesSeries(obj, iteratee, callback) { + return mapValuesLimit$1(obj, 1, iteratee, callback) + } + + /** + * Caches the results of an async function. When creating a hash to store + * function results against, the callback is omitted from the hash and an + * optional hash function can be used. + * + * **Note: if the async function errs, the result will not be cached and + * subsequent calls will call the wrapped function.** + * + * If no hash function is specified, the first argument is used as a hash key, + * which may work reasonably if it is a string or a data type that converts to a + * distinct string. Note that objects and arrays will not behave reasonably. + * Neither will cases where the other arguments are significant. In such cases, + * specify your own hash function. + * + * The cache of results is exposed as the `memo` property of the function + * returned by `memoize`. + * + * @name memoize + * @static + * @memberOf module:Utils + * @method + * @category Util + * @param {AsyncFunction} fn - The async function to proxy and cache results from. + * @param {Function} hasher - An optional function for generating a custom hash + * for storing results. It has all the arguments applied to it apart from the + * callback, and must be synchronous. + * @returns {AsyncFunction} a memoized version of `fn` + * @example + * + * var slow_fn = function(name, callback) { + * // do something + * callback(null, result); + * }; + * var fn = async.memoize(slow_fn); + * + * // fn can now be used as if it were slow_fn + * fn('some name', function() { + * // callback + * }); + */ + function memoize(fn, hasher = v => v) { + var memo = Object.create(null); + var queues = Object.create(null); + var _fn = wrapAsync(fn); + var memoized = initialParams((args, callback) => { + var key = hasher(...args); + if (key in memo) { + setImmediate$1(() => callback(null, ...memo[key])); + } else if (key in queues) { + queues[key].push(callback); + } else { + queues[key] = [callback]; + _fn(...args, (err, ...resultArgs) => { + // #1465 don't memoize if an error occurred + if (!err) { + memo[key] = resultArgs; + } + var q = queues[key]; + delete queues[key]; + for (var i = 0, l = q.length; i < l; i++) { + q[i](err, ...resultArgs); + } + }); + } + }); + memoized.memo = memo; + memoized.unmemoized = fn; + return memoized; + } + + /** + * Calls `callback` on a later loop around the event loop. In Node.js this just + * calls `process.nextTick`. In the browser it will use `setImmediate` if + * available, otherwise `setTimeout(callback, 0)`, which means other higher + * priority events may precede the execution of `callback`. + * + * This is used internally for browser-compatibility purposes. + * + * @name nextTick + * @static + * @memberOf module:Utils + * @method + * @see [async.setImmediate]{@link module:Utils.setImmediate} + * @category Util + * @param {Function} callback - The function to call on a later loop around + * the event loop. Invoked with (args...). + * @param {...*} args... - any number of additional arguments to pass to the + * callback on the next tick. + * @example + * + * var call_order = []; + * async.nextTick(function() { + * call_order.push('two'); + * // call_order now equals ['one','two'] + * }); + * call_order.push('one'); + * + * async.setImmediate(function (a, b, c) { + * // a, b, and c equal 1, 2, and 3 + * }, 1, 2, 3); + */ + var _defer$1; + + if (hasNextTick) { + _defer$1 = process.nextTick; + } else if (hasSetImmediate) { + _defer$1 = setImmediate; + } else { + _defer$1 = fallback; + } + + var nextTick = wrap(_defer$1); + + var parallel = awaitify((eachfn, tasks, callback) => { + var results = isArrayLike(tasks) ? [] : {}; + + eachfn(tasks, (task, key, taskCb) => { + wrapAsync(task)((err, ...result) => { + if (result.length < 2) { + [result] = result; + } + results[key] = result; + taskCb(err); + }); + }, err => callback(err, results)); + }, 3); + + /** + * Run the `tasks` collection of functions in parallel, without waiting until + * the previous function has completed. If any of the functions pass an error to + * its callback, the main `callback` is immediately called with the value of the + * error. Once the `tasks` have completed, the results are passed to the final + * `callback` as an array. + * + * **Note:** `parallel` is about kicking-off I/O tasks in parallel, not about + * parallel execution of code. If your tasks do not use any timers or perform + * any I/O, they will actually be executed in series. Any synchronous setup + * sections for each task will happen one after the other. JavaScript remains + * single-threaded. + * + * **Hint:** Use [`reflect`]{@link module:Utils.reflect} to continue the + * execution of other tasks when a task fails. + * + * It is also possible to use an object instead of an array. Each property will + * be run as a function and the results will be passed to the final `callback` + * as an object instead of an array. This can be a more readable way of handling + * results from {@link async.parallel}. + * + * @name parallel + * @static + * @memberOf module:ControlFlow + * @method + * @category Control Flow + * @param {Array|Iterable|AsyncIterable|Object} tasks - A collection of + * [async functions]{@link AsyncFunction} to run. + * Each async function can complete with any number of optional `result` values. + * @param {Function} [callback] - An optional callback to run once all the + * functions have completed successfully. This function gets a results array + * (or object) containing all the result arguments passed to the task callbacks. + * Invoked with (err, results). + * @returns {Promise} a promise, if a callback is not passed + * + * @example + * async.parallel([ + * function(callback) { + * setTimeout(function() { + * callback(null, 'one'); + * }, 200); + * }, + * function(callback) { + * setTimeout(function() { + * callback(null, 'two'); + * }, 100); + * } + * ], + * // optional callback + * function(err, results) { + * // the results array will equal ['one','two'] even though + * // the second function had a shorter timeout. + * }); + * + * // an example using an object instead of an array + * async.parallel({ + * one: function(callback) { + * setTimeout(function() { + * callback(null, 1); + * }, 200); + * }, + * two: function(callback) { + * setTimeout(function() { + * callback(null, 2); + * }, 100); + * } + * }, function(err, results) { + * // results is now equals to: {one: 1, two: 2} + * }); + */ + function parallel$1(tasks, callback) { + return parallel(eachOf$1, tasks, callback); + } + + /** + * The same as [`parallel`]{@link module:ControlFlow.parallel} but runs a maximum of `limit` async operations at a + * time. + * + * @name parallelLimit + * @static + * @memberOf module:ControlFlow + * @method + * @see [async.parallel]{@link module:ControlFlow.parallel} + * @category Control Flow + * @param {Array|Iterable|AsyncIterable|Object} tasks - A collection of + * [async functions]{@link AsyncFunction} to run. + * Each async function can complete with any number of optional `result` values. + * @param {number} limit - The maximum number of async operations at a time. + * @param {Function} [callback] - An optional callback to run once all the + * functions have completed successfully. This function gets a results array + * (or object) containing all the result arguments passed to the task callbacks. + * Invoked with (err, results). + * @returns {Promise} a promise, if a callback is not passed + */ + function parallelLimit(tasks, limit, callback) { + return parallel(eachOfLimit(limit), tasks, callback); + } + + /** + * A queue of tasks for the worker function to complete. + * @typedef {Iterable} QueueObject + * @memberOf module:ControlFlow + * @property {Function} length - a function returning the number of items + * waiting to be processed. Invoke with `queue.length()`. + * @property {boolean} started - a boolean indicating whether or not any + * items have been pushed and processed by the queue. + * @property {Function} running - a function returning the number of items + * currently being processed. Invoke with `queue.running()`. + * @property {Function} workersList - a function returning the array of items + * currently being processed. Invoke with `queue.workersList()`. + * @property {Function} idle - a function returning false if there are items + * waiting or being processed, or true if not. Invoke with `queue.idle()`. + * @property {number} concurrency - an integer for determining how many `worker` + * functions should be run in parallel. This property can be changed after a + * `queue` is created to alter the concurrency on-the-fly. + * @property {number} payload - an integer that specifies how many items are + * passed to the worker function at a time. only applies if this is a + * [cargo]{@link module:ControlFlow.cargo} object + * @property {AsyncFunction} push - add a new task to the `queue`. Calls `callback` + * once the `worker` has finished processing the task. Instead of a single task, + * a `tasks` array can be submitted. The respective callback is used for every + * task in the list. Invoke with `queue.push(task, [callback])`, + * @property {AsyncFunction} unshift - add a new task to the front of the `queue`. + * Invoke with `queue.unshift(task, [callback])`. + * @property {AsyncFunction} pushAsync - the same as `q.push`, except this returns + * a promise that rejects if an error occurs. + * @property {AsyncFunction} unshirtAsync - the same as `q.unshift`, except this returns + * a promise that rejects if an error occurs. + * @property {Function} remove - remove items from the queue that match a test + * function. The test function will be passed an object with a `data` property, + * and a `priority` property, if this is a + * [priorityQueue]{@link module:ControlFlow.priorityQueue} object. + * Invoked with `queue.remove(testFn)`, where `testFn` is of the form + * `function ({data, priority}) {}` and returns a Boolean. + * @property {Function} saturated - a function that sets a callback that is + * called when the number of running workers hits the `concurrency` limit, and + * further tasks will be queued. If the callback is omitted, `q.saturated()` + * returns a promise for the next occurrence. + * @property {Function} unsaturated - a function that sets a callback that is + * called when the number of running workers is less than the `concurrency` & + * `buffer` limits, and further tasks will not be queued. If the callback is + * omitted, `q.unsaturated()` returns a promise for the next occurrence. + * @property {number} buffer - A minimum threshold buffer in order to say that + * the `queue` is `unsaturated`. + * @property {Function} empty - a function that sets a callback that is called + * when the last item from the `queue` is given to a `worker`. If the callback + * is omitted, `q.empty()` returns a promise for the next occurrence. + * @property {Function} drain - a function that sets a callback that is called + * when the last item from the `queue` has returned from the `worker`. If the + * callback is omitted, `q.drain()` returns a promise for the next occurrence. + * @property {Function} error - a function that sets a callback that is called + * when a task errors. Has the signature `function(error, task)`. If the + * callback is omitted, `error()` returns a promise that rejects on the next + * error. + * @property {boolean} paused - a boolean for determining whether the queue is + * in a paused state. + * @property {Function} pause - a function that pauses the processing of tasks + * until `resume()` is called. Invoke with `queue.pause()`. + * @property {Function} resume - a function that resumes the processing of + * queued tasks when the queue is paused. Invoke with `queue.resume()`. + * @property {Function} kill - a function that removes the `drain` callback and + * empties remaining tasks from the queue forcing it to go idle. No more tasks + * should be pushed to the queue after calling this function. Invoke with `queue.kill()`. + * + * @example + * const q = aync.queue(worker, 2) + * q.push(item1) + * q.push(item2) + * q.push(item3) + * // queues are iterable, spread into an array to inspect + * const items = [...q] // [item1, item2, item3] + * // or use for of + * for (let item of q) { + * console.log(item) + * } + * + * q.drain(() => { + * console.log('all done') + * }) + * // or + * await q.drain() + */ + + /** + * Creates a `queue` object with the specified `concurrency`. Tasks added to the + * `queue` are processed in parallel (up to the `concurrency` limit). If all + * `worker`s are in progress, the task is queued until one becomes available. + * Once a `worker` completes a `task`, that `task`'s callback is called. + * + * @name queue + * @static + * @memberOf module:ControlFlow + * @method + * @category Control Flow + * @param {AsyncFunction} worker - An async function for processing a queued task. + * If you want to handle errors from an individual task, pass a callback to + * `q.push()`. Invoked with (task, callback). + * @param {number} [concurrency=1] - An `integer` for determining how many + * `worker` functions should be run in parallel. If omitted, the concurrency + * defaults to `1`. If the concurrency is `0`, an error is thrown. + * @returns {module:ControlFlow.QueueObject} A queue object to manage the tasks. Callbacks can be + * attached as certain properties to listen for specific events during the + * lifecycle of the queue. + * @example + * + * // create a queue object with concurrency 2 + * var q = async.queue(function(task, callback) { + * console.log('hello ' + task.name); + * callback(); + * }, 2); + * + * // assign a callback + * q.drain(function() { + * console.log('all items have been processed'); + * }); + * // or await the end + * await q.drain() + * + * // assign an error callback + * q.error(function(err, task) { + * console.error('task experienced an error'); + * }); + * + * // add some items to the queue + * q.push({name: 'foo'}, function(err) { + * console.log('finished processing foo'); + * }); + * // callback is optional + * q.push({name: 'bar'}); + * + * // add some items to the queue (batch-wise) + * q.push([{name: 'baz'},{name: 'bay'},{name: 'bax'}], function(err) { + * console.log('finished processing item'); + * }); + * + * // add some items to the front of the queue + * q.unshift({name: 'bar'}, function (err) { + * console.log('finished processing bar'); + * }); + */ + function queue$1 (worker, concurrency) { + var _worker = wrapAsync(worker); + return queue((items, cb) => { + _worker(items[0], cb); + }, concurrency, 1); + } + + // Binary min-heap implementation used for priority queue. + // Implementation is stable, i.e. push time is considered for equal priorities + class Heap { + constructor() { + this.heap = []; + this.pushCount = Number.MIN_SAFE_INTEGER; + } + + get length() { + return this.heap.length; + } + + empty () { + this.heap = []; + return this; + } + + percUp(index) { + let p; + + while (index > 0 && smaller(this.heap[index], this.heap[p=parent(index)])) { + let t = this.heap[index]; + this.heap[index] = this.heap[p]; + this.heap[p] = t; + + index = p; + } + } + + percDown(index) { + let l; + + while ((l=leftChi(index)) < this.heap.length) { + if (l+1 < this.heap.length && smaller(this.heap[l+1], this.heap[l])) { + l = l+1; + } + + if (smaller(this.heap[index], this.heap[l])) { + break; + } + + let t = this.heap[index]; + this.heap[index] = this.heap[l]; + this.heap[l] = t; + + index = l; + } + } + + push(node) { + node.pushCount = ++this.pushCount; + this.heap.push(node); + this.percUp(this.heap.length-1); + } + + unshift(node) { + return this.heap.push(node); + } + + shift() { + let [top] = this.heap; + + this.heap[0] = this.heap[this.heap.length-1]; + this.heap.pop(); + this.percDown(0); + + return top; + } + + toArray() { + return [...this]; + } + + *[Symbol.iterator] () { + for (let i = 0; i < this.heap.length; i++) { + yield this.heap[i].data; + } + } + + remove (testFn) { + let j = 0; + for (let i = 0; i < this.heap.length; i++) { + if (!testFn(this.heap[i])) { + this.heap[j] = this.heap[i]; + j++; + } + } + + this.heap.splice(j); + + for (let i = parent(this.heap.length-1); i >= 0; i--) { + this.percDown(i); + } + + return this; + } + } + + function leftChi(i) { + return (i<<1)+1; + } + + function parent(i) { + return ((i+1)>>1)-1; + } + + function smaller(x, y) { + if (x.priority !== y.priority) { + return x.priority < y.priority; + } + else { + return x.pushCount < y.pushCount; + } + } + + /** + * The same as [async.queue]{@link module:ControlFlow.queue} only tasks are assigned a priority and + * completed in ascending priority order. + * + * @name priorityQueue + * @static + * @memberOf module:ControlFlow + * @method + * @see [async.queue]{@link module:ControlFlow.queue} + * @category Control Flow + * @param {AsyncFunction} worker - An async function for processing a queued task. + * If you want to handle errors from an individual task, pass a callback to + * `q.push()`. + * Invoked with (task, callback). + * @param {number} concurrency - An `integer` for determining how many `worker` + * functions should be run in parallel. If omitted, the concurrency defaults to + * `1`. If the concurrency is `0`, an error is thrown. + * @returns {module:ControlFlow.QueueObject} A priorityQueue object to manage the tasks. There are two + * differences between `queue` and `priorityQueue` objects: + * * `push(task, priority, [callback])` - `priority` should be a number. If an + * array of `tasks` is given, all tasks will be assigned the same priority. + * * The `unshift` method was removed. + */ + function priorityQueue(worker, concurrency) { + // Start with a normal queue + var q = queue$1(worker, concurrency); + + q._tasks = new Heap(); + + // Override push to accept second parameter representing priority + q.push = function(data, priority = 0, callback = () => {}) { + if (typeof callback !== 'function') { + throw new Error('task callback must be a function'); + } + q.started = true; + if (!Array.isArray(data)) { + data = [data]; + } + if (data.length === 0 && q.idle()) { + // call drain immediately if there are no tasks + return setImmediate$1(() => q.drain()); + } + + for (var i = 0, l = data.length; i < l; i++) { + var item = { + data: data[i], + priority, + callback + }; + + q._tasks.push(item); + } + + setImmediate$1(q.process); + }; + + // Remove unshift function + delete q.unshift; + + return q; + } + + /** + * Runs the `tasks` array of functions in parallel, without waiting until the + * previous function has completed. Once any of the `tasks` complete or pass an + * error to its callback, the main `callback` is immediately called. It's + * equivalent to `Promise.race()`. + * + * @name race + * @static + * @memberOf module:ControlFlow + * @method + * @category Control Flow + * @param {Array} tasks - An array containing [async functions]{@link AsyncFunction} + * to run. Each function can complete with an optional `result` value. + * @param {Function} callback - A callback to run once any of the functions have + * completed. This function gets an error or result from the first function that + * completed. Invoked with (err, result). + * @returns undefined + * @example + * + * async.race([ + * function(callback) { + * setTimeout(function() { + * callback(null, 'one'); + * }, 200); + * }, + * function(callback) { + * setTimeout(function() { + * callback(null, 'two'); + * }, 100); + * } + * ], + * // main callback + * function(err, result) { + * // the result will be equal to 'two' as it finishes earlier + * }); + */ + function race(tasks, callback) { + callback = once(callback); + if (!Array.isArray(tasks)) return callback(new TypeError('First argument to race must be an array of functions')); + if (!tasks.length) return callback(); + for (var i = 0, l = tasks.length; i < l; i++) { + wrapAsync(tasks[i])(callback); + } + } + + var race$1 = awaitify(race, 2); + + /** + * Same as [`reduce`]{@link module:Collections.reduce}, only operates on `array` in reverse order. + * + * @name reduceRight + * @static + * @memberOf module:Collections + * @method + * @see [async.reduce]{@link module:Collections.reduce} + * @alias foldr + * @category Collection + * @param {Array} array - A collection to iterate over. + * @param {*} memo - The initial state of the reduction. + * @param {AsyncFunction} iteratee - A function applied to each item in the + * array to produce the next step in the reduction. + * The `iteratee` should complete with the next state of the reduction. + * If the iteratee complete with an error, the reduction is stopped and the + * main `callback` is immediately called with the error. + * Invoked with (memo, item, callback). + * @param {Function} [callback] - A callback which is called after all the + * `iteratee` functions have finished. Result is the reduced value. Invoked with + * (err, result). + * @returns {Promise} a promise, if no callback is passed + */ + function reduceRight (array, memo, iteratee, callback) { + var reversed = [...array].reverse(); + return reduce$1(reversed, memo, iteratee, callback); + } + + /** + * Wraps the async function in another function that always completes with a + * result object, even when it errors. + * + * The result object has either the property `error` or `value`. + * + * @name reflect + * @static + * @memberOf module:Utils + * @method + * @category Util + * @param {AsyncFunction} fn - The async function you want to wrap + * @returns {Function} - A function that always passes null to it's callback as + * the error. The second argument to the callback will be an `object` with + * either an `error` or a `value` property. + * @example + * + * async.parallel([ + * async.reflect(function(callback) { + * // do some stuff ... + * callback(null, 'one'); + * }), + * async.reflect(function(callback) { + * // do some more stuff but error ... + * callback('bad stuff happened'); + * }), + * async.reflect(function(callback) { + * // do some more stuff ... + * callback(null, 'two'); + * }) + * ], + * // optional callback + * function(err, results) { + * // values + * // results[0].value = 'one' + * // results[1].error = 'bad stuff happened' + * // results[2].value = 'two' + * }); + */ + function reflect(fn) { + var _fn = wrapAsync(fn); + return initialParams(function reflectOn(args, reflectCallback) { + args.push((error, ...cbArgs) => { + let retVal = {}; + if (error) { + retVal.error = error; + } + if (cbArgs.length > 0){ + var value = cbArgs; + if (cbArgs.length <= 1) { + [value] = cbArgs; + } + retVal.value = value; + } + reflectCallback(null, retVal); + }); + + return _fn.apply(this, args); + }); + } + + /** + * A helper function that wraps an array or an object of functions with `reflect`. + * + * @name reflectAll + * @static + * @memberOf module:Utils + * @method + * @see [async.reflect]{@link module:Utils.reflect} + * @category Util + * @param {Array|Object|Iterable} tasks - The collection of + * [async functions]{@link AsyncFunction} to wrap in `async.reflect`. + * @returns {Array} Returns an array of async functions, each wrapped in + * `async.reflect` + * @example + * + * let tasks = [ + * function(callback) { + * setTimeout(function() { + * callback(null, 'one'); + * }, 200); + * }, + * function(callback) { + * // do some more stuff but error ... + * callback(new Error('bad stuff happened')); + * }, + * function(callback) { + * setTimeout(function() { + * callback(null, 'two'); + * }, 100); + * } + * ]; + * + * async.parallel(async.reflectAll(tasks), + * // optional callback + * function(err, results) { + * // values + * // results[0].value = 'one' + * // results[1].error = Error('bad stuff happened') + * // results[2].value = 'two' + * }); + * + * // an example using an object instead of an array + * let tasks = { + * one: function(callback) { + * setTimeout(function() { + * callback(null, 'one'); + * }, 200); + * }, + * two: function(callback) { + * callback('two'); + * }, + * three: function(callback) { + * setTimeout(function() { + * callback(null, 'three'); + * }, 100); + * } + * }; + * + * async.parallel(async.reflectAll(tasks), + * // optional callback + * function(err, results) { + * // values + * // results.one.value = 'one' + * // results.two.error = 'two' + * // results.three.value = 'three' + * }); + */ + function reflectAll(tasks) { + var results; + if (Array.isArray(tasks)) { + results = tasks.map(reflect); + } else { + results = {}; + Object.keys(tasks).forEach(key => { + results[key] = reflect.call(this, tasks[key]); + }); + } + return results; + } + + function reject(eachfn, arr, _iteratee, callback) { + const iteratee = wrapAsync(_iteratee); + return _filter(eachfn, arr, (value, cb) => { + iteratee(value, (err, v) => { + cb(err, !v); + }); + }, callback); + } + + /** + * The opposite of [`filter`]{@link module:Collections.filter}. Removes values that pass an `async` truth test. + * + * @name reject + * @static + * @memberOf module:Collections + * @method + * @see [async.filter]{@link module:Collections.filter} + * @category Collection + * @param {Array|Iterable|AsyncIterable|Object} coll - A collection to iterate over. + * @param {Function} iteratee - An async truth test to apply to each item in + * `coll`. + * The should complete with a boolean value as its `result`. + * Invoked with (item, callback). + * @param {Function} [callback] - A callback which is called after all the + * `iteratee` functions have finished. Invoked with (err, results). + * @returns {Promise} a promise, if no callback is passed + * @example + * + * async.reject(['file1','file2','file3'], function(filePath, callback) { + * fs.access(filePath, function(err) { + * callback(null, !err) + * }); + * }, function(err, results) { + * // results now equals an array of missing files + * createFiles(results); + * }); + */ + function reject$1 (coll, iteratee, callback) { + return reject(eachOf$1, coll, iteratee, callback) + } + var reject$2 = awaitify(reject$1, 3); + + /** + * The same as [`reject`]{@link module:Collections.reject} but runs a maximum of `limit` async operations at a + * time. + * + * @name rejectLimit + * @static + * @memberOf module:Collections + * @method + * @see [async.reject]{@link module:Collections.reject} + * @category Collection + * @param {Array|Iterable|AsyncIterable|Object} coll - A collection to iterate over. + * @param {number} limit - The maximum number of async operations at a time. + * @param {Function} iteratee - An async truth test to apply to each item in + * `coll`. + * The should complete with a boolean value as its `result`. + * Invoked with (item, callback). + * @param {Function} [callback] - A callback which is called after all the + * `iteratee` functions have finished. Invoked with (err, results). + * @returns {Promise} a promise, if no callback is passed + */ + function rejectLimit (coll, limit, iteratee, callback) { + return reject(eachOfLimit(limit), coll, iteratee, callback) + } + var rejectLimit$1 = awaitify(rejectLimit, 4); + + /** + * The same as [`reject`]{@link module:Collections.reject} but runs only a single async operation at a time. + * + * @name rejectSeries + * @static + * @memberOf module:Collections + * @method + * @see [async.reject]{@link module:Collections.reject} + * @category Collection + * @param {Array|Iterable|AsyncIterable|Object} coll - A collection to iterate over. + * @param {Function} iteratee - An async truth test to apply to each item in + * `coll`. + * The should complete with a boolean value as its `result`. + * Invoked with (item, callback). + * @param {Function} [callback] - A callback which is called after all the + * `iteratee` functions have finished. Invoked with (err, results). + * @returns {Promise} a promise, if no callback is passed + */ + function rejectSeries (coll, iteratee, callback) { + return reject(eachOfSeries$1, coll, iteratee, callback) + } + var rejectSeries$1 = awaitify(rejectSeries, 3); + + function constant$1(value) { + return function () { + return value; + } + } + + /** + * Attempts to get a successful response from `task` no more than `times` times + * before returning an error. If the task is successful, the `callback` will be + * passed the result of the successful task. If all attempts fail, the callback + * will be passed the error and result (if any) of the final attempt. + * + * @name retry + * @static + * @memberOf module:ControlFlow + * @method + * @category Control Flow + * @see [async.retryable]{@link module:ControlFlow.retryable} + * @param {Object|number} [opts = {times: 5, interval: 0}| 5] - Can be either an + * object with `times` and `interval` or a number. + * * `times` - The number of attempts to make before giving up. The default + * is `5`. + * * `interval` - The time to wait between retries, in milliseconds. The + * default is `0`. The interval may also be specified as a function of the + * retry count (see example). + * * `errorFilter` - An optional synchronous function that is invoked on + * erroneous result. If it returns `true` the retry attempts will continue; + * if the function returns `false` the retry flow is aborted with the current + * attempt's error and result being returned to the final callback. + * Invoked with (err). + * * If `opts` is a number, the number specifies the number of times to retry, + * with the default interval of `0`. + * @param {AsyncFunction} task - An async function to retry. + * Invoked with (callback). + * @param {Function} [callback] - An optional callback which is called when the + * task has succeeded, or after the final failed attempt. It receives the `err` + * and `result` arguments of the last attempt at completing the `task`. Invoked + * with (err, results). + * @returns {Promise} a promise if no callback provided + * + * @example + * + * // The `retry` function can be used as a stand-alone control flow by passing + * // a callback, as shown below: + * + * // try calling apiMethod 3 times + * async.retry(3, apiMethod, function(err, result) { + * // do something with the result + * }); + * + * // try calling apiMethod 3 times, waiting 200 ms between each retry + * async.retry({times: 3, interval: 200}, apiMethod, function(err, result) { + * // do something with the result + * }); + * + * // try calling apiMethod 10 times with exponential backoff + * // (i.e. intervals of 100, 200, 400, 800, 1600, ... milliseconds) + * async.retry({ + * times: 10, + * interval: function(retryCount) { + * return 50 * Math.pow(2, retryCount); + * } + * }, apiMethod, function(err, result) { + * // do something with the result + * }); + * + * // try calling apiMethod the default 5 times no delay between each retry + * async.retry(apiMethod, function(err, result) { + * // do something with the result + * }); + * + * // try calling apiMethod only when error condition satisfies, all other + * // errors will abort the retry control flow and return to final callback + * async.retry({ + * errorFilter: function(err) { + * return err.message === 'Temporary error'; // only retry on a specific error + * } + * }, apiMethod, function(err, result) { + * // do something with the result + * }); + * + * // to retry individual methods that are not as reliable within other + * // control flow functions, use the `retryable` wrapper: + * async.auto({ + * users: api.getUsers.bind(api), + * payments: async.retryable(3, api.getPayments.bind(api)) + * }, function(err, results) { + * // do something with the results + * }); + * + */ + const DEFAULT_TIMES = 5; + const DEFAULT_INTERVAL = 0; + + function retry(opts, task, callback) { + var options = { + times: DEFAULT_TIMES, + intervalFunc: constant$1(DEFAULT_INTERVAL) + }; + + if (arguments.length < 3 && typeof opts === 'function') { + callback = task || promiseCallback(); + task = opts; + } else { + parseTimes(options, opts); + callback = callback || promiseCallback(); + } + + if (typeof task !== 'function') { + throw new Error("Invalid arguments for async.retry"); + } + + var _task = wrapAsync(task); + + var attempt = 1; + function retryAttempt() { + _task((err, ...args) => { + if (err === false) return + if (err && attempt++ < options.times && + (typeof options.errorFilter != 'function' || + options.errorFilter(err))) { + setTimeout(retryAttempt, options.intervalFunc(attempt - 1)); + } else { + callback(err, ...args); + } + }); + } + + retryAttempt(); + return callback[PROMISE_SYMBOL] + } + + function parseTimes(acc, t) { + if (typeof t === 'object') { + acc.times = +t.times || DEFAULT_TIMES; + + acc.intervalFunc = typeof t.interval === 'function' ? + t.interval : + constant$1(+t.interval || DEFAULT_INTERVAL); + + acc.errorFilter = t.errorFilter; + } else if (typeof t === 'number' || typeof t === 'string') { + acc.times = +t || DEFAULT_TIMES; + } else { + throw new Error("Invalid arguments for async.retry"); + } + } + + /** + * A close relative of [`retry`]{@link module:ControlFlow.retry}. This method + * wraps a task and makes it retryable, rather than immediately calling it + * with retries. + * + * @name retryable + * @static + * @memberOf module:ControlFlow + * @method + * @see [async.retry]{@link module:ControlFlow.retry} + * @category Control Flow + * @param {Object|number} [opts = {times: 5, interval: 0}| 5] - optional + * options, exactly the same as from `retry`, except for a `opts.arity` that + * is the arity of the `task` function, defaulting to `task.length` + * @param {AsyncFunction} task - the asynchronous function to wrap. + * This function will be passed any arguments passed to the returned wrapper. + * Invoked with (...args, callback). + * @returns {AsyncFunction} The wrapped function, which when invoked, will + * retry on an error, based on the parameters specified in `opts`. + * This function will accept the same parameters as `task`. + * @example + * + * async.auto({ + * dep1: async.retryable(3, getFromFlakyService), + * process: ["dep1", async.retryable(3, function (results, cb) { + * maybeProcessData(results.dep1, cb); + * })] + * }, callback); + */ + function retryable (opts, task) { + if (!task) { + task = opts; + opts = null; + } + let arity = (opts && opts.arity) || task.length; + if (isAsync(task)) { + arity += 1; + } + var _task = wrapAsync(task); + return initialParams((args, callback) => { + if (args.length < arity - 1 || callback == null) { + args.push(callback); + callback = promiseCallback(); + } + function taskFn(cb) { + _task(...args, cb); + } + + if (opts) retry(opts, taskFn, callback); + else retry(taskFn, callback); + + return callback[PROMISE_SYMBOL] + }); + } + + /** + * Run the functions in the `tasks` collection in series, each one running once + * the previous function has completed. If any functions in the series pass an + * error to its callback, no more functions are run, and `callback` is + * immediately called with the value of the error. Otherwise, `callback` + * receives an array of results when `tasks` have completed. + * + * It is also possible to use an object instead of an array. Each property will + * be run as a function, and the results will be passed to the final `callback` + * as an object instead of an array. This can be a more readable way of handling + * results from {@link async.series}. + * + * **Note** that while many implementations preserve the order of object + * properties, the [ECMAScript Language Specification](http://www.ecma-international.org/ecma-262/5.1/#sec-8.6) + * explicitly states that + * + * > The mechanics and order of enumerating the properties is not specified. + * + * So if you rely on the order in which your series of functions are executed, + * and want this to work on all platforms, consider using an array. + * + * @name series + * @static + * @memberOf module:ControlFlow + * @method + * @category Control Flow + * @param {Array|Iterable|AsyncIterable|Object} tasks - A collection containing + * [async functions]{@link AsyncFunction} to run in series. + * Each function can complete with any number of optional `result` values. + * @param {Function} [callback] - An optional callback to run once all the + * functions have completed. This function gets a results array (or object) + * containing all the result arguments passed to the `task` callbacks. Invoked + * with (err, result). + * @return {Promise} a promise, if no callback is passed + * @example + * async.series([ + * function(callback) { + * // do some stuff ... + * callback(null, 'one'); + * }, + * function(callback) { + * // do some more stuff ... + * callback(null, 'two'); + * } + * ], + * // optional callback + * function(err, results) { + * // results is now equal to ['one', 'two'] + * }); + * + * async.series({ + * one: function(callback) { + * setTimeout(function() { + * callback(null, 1); + * }, 200); + * }, + * two: function(callback){ + * setTimeout(function() { + * callback(null, 2); + * }, 100); + * } + * }, function(err, results) { + * // results is now equal to: {one: 1, two: 2} + * }); + */ + function series(tasks, callback) { + return parallel(eachOfSeries$1, tasks, callback); + } + + /** + * Returns `true` if at least one element in the `coll` satisfies an async test. + * If any iteratee call returns `true`, the main `callback` is immediately + * called. + * + * @name some + * @static + * @memberOf module:Collections + * @method + * @alias any + * @category Collection + * @param {Array|Iterable|AsyncIterable|Object} coll - A collection to iterate over. + * @param {AsyncFunction} iteratee - An async truth test to apply to each item + * in the collections in parallel. + * The iteratee should complete with a boolean `result` value. + * Invoked with (item, callback). + * @param {Function} [callback] - A callback which is called as soon as any + * iteratee returns `true`, or after all the iteratee functions have finished. + * Result will be either `true` or `false` depending on the values of the async + * tests. Invoked with (err, result). + * @returns {Promise} a promise, if no callback provided + * @example + * + * async.some(['file1','file2','file3'], function(filePath, callback) { + * fs.access(filePath, function(err) { + * callback(null, !err) + * }); + * }, function(err, result) { + * // if result is true then at least one of the files exists + * }); + */ + function some(coll, iteratee, callback) { + return _createTester(Boolean, res => res)(eachOf$1, coll, iteratee, callback) + } + var some$1 = awaitify(some, 3); + + /** + * The same as [`some`]{@link module:Collections.some} but runs a maximum of `limit` async operations at a time. + * + * @name someLimit + * @static + * @memberOf module:Collections + * @method + * @see [async.some]{@link module:Collections.some} + * @alias anyLimit + * @category Collection + * @param {Array|Iterable|AsyncIterable|Object} coll - A collection to iterate over. + * @param {number} limit - The maximum number of async operations at a time. + * @param {AsyncFunction} iteratee - An async truth test to apply to each item + * in the collections in parallel. + * The iteratee should complete with a boolean `result` value. + * Invoked with (item, callback). + * @param {Function} [callback] - A callback which is called as soon as any + * iteratee returns `true`, or after all the iteratee functions have finished. + * Result will be either `true` or `false` depending on the values of the async + * tests. Invoked with (err, result). + * @returns {Promise} a promise, if no callback provided + */ + function someLimit(coll, limit, iteratee, callback) { + return _createTester(Boolean, res => res)(eachOfLimit(limit), coll, iteratee, callback) + } + var someLimit$1 = awaitify(someLimit, 4); + + /** + * The same as [`some`]{@link module:Collections.some} but runs only a single async operation at a time. + * + * @name someSeries + * @static + * @memberOf module:Collections + * @method + * @see [async.some]{@link module:Collections.some} + * @alias anySeries + * @category Collection + * @param {Array|Iterable|AsyncIterable|Object} coll - A collection to iterate over. + * @param {AsyncFunction} iteratee - An async truth test to apply to each item + * in the collections in series. + * The iteratee should complete with a boolean `result` value. + * Invoked with (item, callback). + * @param {Function} [callback] - A callback which is called as soon as any + * iteratee returns `true`, or after all the iteratee functions have finished. + * Result will be either `true` or `false` depending on the values of the async + * tests. Invoked with (err, result). + * @returns {Promise} a promise, if no callback provided + */ + function someSeries(coll, iteratee, callback) { + return _createTester(Boolean, res => res)(eachOfSeries$1, coll, iteratee, callback) + } + var someSeries$1 = awaitify(someSeries, 3); + + /** + * Sorts a list by the results of running each `coll` value through an async + * `iteratee`. + * + * @name sortBy + * @static + * @memberOf module:Collections + * @method + * @category Collection + * @param {Array|Iterable|AsyncIterable|Object} coll - A collection to iterate over. + * @param {AsyncFunction} iteratee - An async function to apply to each item in + * `coll`. + * The iteratee should complete with a value to use as the sort criteria as + * its `result`. + * Invoked with (item, callback). + * @param {Function} callback - A callback which is called after all the + * `iteratee` functions have finished, or an error occurs. Results is the items + * from the original `coll` sorted by the values returned by the `iteratee` + * calls. Invoked with (err, results). + * @returns {Promise} a promise, if no callback passed + * @example + * + * async.sortBy(['file1','file2','file3'], function(file, callback) { + * fs.stat(file, function(err, stats) { + * callback(err, stats.mtime); + * }); + * }, function(err, results) { + * // results is now the original array of files sorted by + * // modified date + * }); + * + * // By modifying the callback parameter the + * // sorting order can be influenced: + * + * // ascending order + * async.sortBy([1,9,3,5], function(x, callback) { + * callback(null, x); + * }, function(err,result) { + * // result callback + * }); + * + * // descending order + * async.sortBy([1,9,3,5], function(x, callback) { + * callback(null, x*-1); //<- x*-1 instead of x, turns the order around + * }, function(err,result) { + * // result callback + * }); + */ + function sortBy (coll, iteratee, callback) { + var _iteratee = wrapAsync(iteratee); + return map$1(coll, (x, iterCb) => { + _iteratee(x, (err, criteria) => { + if (err) return iterCb(err); + iterCb(err, {value: x, criteria}); + }); + }, (err, results) => { + if (err) return callback(err); + callback(null, results.sort(comparator).map(v => v.value)); + }); + + function comparator(left, right) { + var a = left.criteria, b = right.criteria; + return a < b ? -1 : a > b ? 1 : 0; + } + } + var sortBy$1 = awaitify(sortBy, 3); + + /** + * Sets a time limit on an asynchronous function. If the function does not call + * its callback within the specified milliseconds, it will be called with a + * timeout error. The code property for the error object will be `'ETIMEDOUT'`. + * + * @name timeout + * @static + * @memberOf module:Utils + * @method + * @category Util + * @param {AsyncFunction} asyncFn - The async function to limit in time. + * @param {number} milliseconds - The specified time limit. + * @param {*} [info] - Any variable you want attached (`string`, `object`, etc) + * to timeout Error for more information.. + * @returns {AsyncFunction} Returns a wrapped function that can be used with any + * of the control flow functions. + * Invoke this function with the same parameters as you would `asyncFunc`. + * @example + * + * function myFunction(foo, callback) { + * doAsyncTask(foo, function(err, data) { + * // handle errors + * if (err) return callback(err); + * + * // do some stuff ... + * + * // return processed data + * return callback(null, data); + * }); + * } + * + * var wrapped = async.timeout(myFunction, 1000); + * + * // call `wrapped` as you would `myFunction` + * wrapped({ bar: 'bar' }, function(err, data) { + * // if `myFunction` takes < 1000 ms to execute, `err` + * // and `data` will have their expected values + * + * // else `err` will be an Error with the code 'ETIMEDOUT' + * }); + */ + function timeout(asyncFn, milliseconds, info) { + var fn = wrapAsync(asyncFn); + + return initialParams((args, callback) => { + var timedOut = false; + var timer; + + function timeoutCallback() { + var name = asyncFn.name || 'anonymous'; + var error = new Error('Callback function "' + name + '" timed out.'); + error.code = 'ETIMEDOUT'; + if (info) { + error.info = info; + } + timedOut = true; + callback(error); + } + + args.push((...cbArgs) => { + if (!timedOut) { + callback(...cbArgs); + clearTimeout(timer); + } + }); + + // setup timer and call original function + timer = setTimeout(timeoutCallback, milliseconds); + fn(...args); + }); + } + + function range(size) { + var result = Array(size); + while (size--) { + result[size] = size; + } + return result; + } + + /** + * The same as [times]{@link module:ControlFlow.times} but runs a maximum of `limit` async operations at a + * time. + * + * @name timesLimit + * @static + * @memberOf module:ControlFlow + * @method + * @see [async.times]{@link module:ControlFlow.times} + * @category Control Flow + * @param {number} count - The number of times to run the function. + * @param {number} limit - The maximum number of async operations at a time. + * @param {AsyncFunction} iteratee - The async function to call `n` times. + * Invoked with the iteration index and a callback: (n, next). + * @param {Function} callback - see [async.map]{@link module:Collections.map}. + * @returns {Promise} a promise, if no callback is provided + */ + function timesLimit(count, limit, iteratee, callback) { + var _iteratee = wrapAsync(iteratee); + return mapLimit$1(range(count), limit, _iteratee, callback); + } + + /** + * Calls the `iteratee` function `n` times, and accumulates results in the same + * manner you would use with [map]{@link module:Collections.map}. + * + * @name times + * @static + * @memberOf module:ControlFlow + * @method + * @see [async.map]{@link module:Collections.map} + * @category Control Flow + * @param {number} n - The number of times to run the function. + * @param {AsyncFunction} iteratee - The async function to call `n` times. + * Invoked with the iteration index and a callback: (n, next). + * @param {Function} callback - see {@link module:Collections.map}. + * @returns {Promise} a promise, if no callback is provided + * @example + * + * // Pretend this is some complicated async factory + * var createUser = function(id, callback) { + * callback(null, { + * id: 'user' + id + * }); + * }; + * + * // generate 5 users + * async.times(5, function(n, next) { + * createUser(n, function(err, user) { + * next(err, user); + * }); + * }, function(err, users) { + * // we should now have 5 users + * }); + */ + function times (n, iteratee, callback) { + return timesLimit(n, Infinity, iteratee, callback) + } + + /** + * The same as [times]{@link module:ControlFlow.times} but runs only a single async operation at a time. + * + * @name timesSeries + * @static + * @memberOf module:ControlFlow + * @method + * @see [async.times]{@link module:ControlFlow.times} + * @category Control Flow + * @param {number} n - The number of times to run the function. + * @param {AsyncFunction} iteratee - The async function to call `n` times. + * Invoked with the iteration index and a callback: (n, next). + * @param {Function} callback - see {@link module:Collections.map}. + * @returns {Promise} a promise, if no callback is provided + */ + function timesSeries (n, iteratee, callback) { + return timesLimit(n, 1, iteratee, callback) + } + + /** + * A relative of `reduce`. Takes an Object or Array, and iterates over each + * element in parallel, each step potentially mutating an `accumulator` value. + * The type of the accumulator defaults to the type of collection passed in. + * + * @name transform + * @static + * @memberOf module:Collections + * @method + * @category Collection + * @param {Array|Iterable|AsyncIterable|Object} coll - A collection to iterate over. + * @param {*} [accumulator] - The initial state of the transform. If omitted, + * it will default to an empty Object or Array, depending on the type of `coll` + * @param {AsyncFunction} iteratee - A function applied to each item in the + * collection that potentially modifies the accumulator. + * Invoked with (accumulator, item, key, callback). + * @param {Function} [callback] - A callback which is called after all the + * `iteratee` functions have finished. Result is the transformed accumulator. + * Invoked with (err, result). + * @returns {Promise} a promise, if no callback provided + * @example + * + * async.transform([1,2,3], function(acc, item, index, callback) { + * // pointless async: + * process.nextTick(function() { + * acc[index] = item * 2 + * callback(null) + * }); + * }, function(err, result) { + * // result is now equal to [2, 4, 6] + * }); + * + * @example + * + * async.transform({a: 1, b: 2, c: 3}, function (obj, val, key, callback) { + * setImmediate(function () { + * obj[key] = val * 2; + * callback(); + * }) + * }, function (err, result) { + * // result is equal to {a: 2, b: 4, c: 6} + * }) + */ + function transform (coll, accumulator, iteratee, callback) { + if (arguments.length <= 3 && typeof accumulator === 'function') { + callback = iteratee; + iteratee = accumulator; + accumulator = Array.isArray(coll) ? [] : {}; + } + callback = once(callback || promiseCallback()); + var _iteratee = wrapAsync(iteratee); + + eachOf$1(coll, (v, k, cb) => { + _iteratee(accumulator, v, k, cb); + }, err => callback(err, accumulator)); + return callback[PROMISE_SYMBOL] + } + + /** + * It runs each task in series but stops whenever any of the functions were + * successful. If one of the tasks were successful, the `callback` will be + * passed the result of the successful task. If all tasks fail, the callback + * will be passed the error and result (if any) of the final attempt. + * + * @name tryEach + * @static + * @memberOf module:ControlFlow + * @method + * @category Control Flow + * @param {Array|Iterable|AsyncIterable|Object} tasks - A collection containing functions to + * run, each function is passed a `callback(err, result)` it must call on + * completion with an error `err` (which can be `null`) and an optional `result` + * value. + * @param {Function} [callback] - An optional callback which is called when one + * of the tasks has succeeded, or all have failed. It receives the `err` and + * `result` arguments of the last attempt at completing the `task`. Invoked with + * (err, results). + * @returns {Promise} a promise, if no callback is passed + * @example + * async.tryEach([ + * function getDataFromFirstWebsite(callback) { + * // Try getting the data from the first website + * callback(err, data); + * }, + * function getDataFromSecondWebsite(callback) { + * // First website failed, + * // Try getting the data from the backup website + * callback(err, data); + * } + * ], + * // optional callback + * function(err, results) { + * Now do something with the data. + * }); + * + */ + function tryEach(tasks, callback) { + var error = null; + var result; + return eachSeries$1(tasks, (task, taskCb) => { + wrapAsync(task)((err, ...args) => { + if (err === false) return taskCb(err); + + if (args.length < 2) { + [result] = args; + } else { + result = args; + } + error = err; + taskCb(err ? null : {}); + }); + }, () => callback(error, result)); + } + + var tryEach$1 = awaitify(tryEach); + + /** + * Undoes a [memoize]{@link module:Utils.memoize}d function, reverting it to the original, + * unmemoized form. Handy for testing. + * + * @name unmemoize + * @static + * @memberOf module:Utils + * @method + * @see [async.memoize]{@link module:Utils.memoize} + * @category Util + * @param {AsyncFunction} fn - the memoized function + * @returns {AsyncFunction} a function that calls the original unmemoized function + */ + function unmemoize(fn) { + return (...args) => { + return (fn.unmemoized || fn)(...args); + }; + } + + /** + * Repeatedly call `iteratee`, while `test` returns `true`. Calls `callback` when + * stopped, or an error occurs. + * + * @name whilst + * @static + * @memberOf module:ControlFlow + * @method + * @category Control Flow + * @param {AsyncFunction} test - asynchronous truth test to perform before each + * execution of `iteratee`. Invoked with (). + * @param {AsyncFunction} iteratee - An async function which is called each time + * `test` passes. Invoked with (callback). + * @param {Function} [callback] - A callback which is called after the test + * function has failed and repeated execution of `iteratee` has stopped. `callback` + * will be passed an error and any arguments passed to the final `iteratee`'s + * callback. Invoked with (err, [results]); + * @returns {Promise} a promise, if no callback is passed + * @example + * + * var count = 0; + * async.whilst( + * function test(cb) { cb(null, count < 5); }, + * function iter(callback) { + * count++; + * setTimeout(function() { + * callback(null, count); + * }, 1000); + * }, + * function (err, n) { + * // 5 seconds have passed, n = 5 + * } + * ); + */ + function whilst(test, iteratee, callback) { + callback = onlyOnce(callback); + var _fn = wrapAsync(iteratee); + var _test = wrapAsync(test); + var results = []; + + function next(err, ...rest) { + if (err) return callback(err); + results = rest; + if (err === false) return; + _test(check); + } + + function check(err, truth) { + if (err) return callback(err); + if (err === false) return; + if (!truth) return callback(null, ...results); + _fn(next); + } + + return _test(check); + } + var whilst$1 = awaitify(whilst, 3); + + /** + * Repeatedly call `iteratee` until `test` returns `true`. Calls `callback` when + * stopped, or an error occurs. `callback` will be passed an error and any + * arguments passed to the final `iteratee`'s callback. + * + * The inverse of [whilst]{@link module:ControlFlow.whilst}. + * + * @name until + * @static + * @memberOf module:ControlFlow + * @method + * @see [async.whilst]{@link module:ControlFlow.whilst} + * @category Control Flow + * @param {AsyncFunction} test - asynchronous truth test to perform before each + * execution of `iteratee`. Invoked with (callback). + * @param {AsyncFunction} iteratee - An async function which is called each time + * `test` fails. Invoked with (callback). + * @param {Function} [callback] - A callback which is called after the test + * function has passed and repeated execution of `iteratee` has stopped. `callback` + * will be passed an error and any arguments passed to the final `iteratee`'s + * callback. Invoked with (err, [results]); + * @returns {Promise} a promise, if a callback is not passed + * + * @example + * const results = [] + * let finished = false + * async.until(function test(page, cb) { + * cb(null, finished) + * }, function iter(next) { + * fetchPage(url, (err, body) => { + * if (err) return next(err) + * results = results.concat(body.objects) + * finished = !!body.next + * next(err) + * }) + * }, function done (err) { + * // all pages have been fetched + * }) + */ + function until(test, iteratee, callback) { + const _test = wrapAsync(test); + return whilst$1((cb) => _test((err, truth) => cb (err, !truth)), iteratee, callback); + } + + /** + * Runs the `tasks` array of functions in series, each passing their results to + * the next in the array. However, if any of the `tasks` pass an error to their + * own callback, the next function is not executed, and the main `callback` is + * immediately called with the error. + * + * @name waterfall + * @static + * @memberOf module:ControlFlow + * @method + * @category Control Flow + * @param {Array} tasks - An array of [async functions]{@link AsyncFunction} + * to run. + * Each function should complete with any number of `result` values. + * The `result` values will be passed as arguments, in order, to the next task. + * @param {Function} [callback] - An optional callback to run once all the + * functions have completed. This will be passed the results of the last task's + * callback. Invoked with (err, [results]). + * @returns undefined + * @example + * + * async.waterfall([ + * function(callback) { + * callback(null, 'one', 'two'); + * }, + * function(arg1, arg2, callback) { + * // arg1 now equals 'one' and arg2 now equals 'two' + * callback(null, 'three'); + * }, + * function(arg1, callback) { + * // arg1 now equals 'three' + * callback(null, 'done'); + * } + * ], function (err, result) { + * // result now equals 'done' + * }); + * + * // Or, with named functions: + * async.waterfall([ + * myFirstFunction, + * mySecondFunction, + * myLastFunction, + * ], function (err, result) { + * // result now equals 'done' + * }); + * function myFirstFunction(callback) { + * callback(null, 'one', 'two'); + * } + * function mySecondFunction(arg1, arg2, callback) { + * // arg1 now equals 'one' and arg2 now equals 'two' + * callback(null, 'three'); + * } + * function myLastFunction(arg1, callback) { + * // arg1 now equals 'three' + * callback(null, 'done'); + * } + */ + function waterfall (tasks, callback) { + callback = once(callback); + if (!Array.isArray(tasks)) return callback(new Error('First argument to waterfall must be an array of functions')); + if (!tasks.length) return callback(); + var taskIndex = 0; + + function nextTask(args) { + var task = wrapAsync(tasks[taskIndex++]); + task(...args, onlyOnce(next)); + } + + function next(err, ...args) { + if (err === false) return + if (err || taskIndex === tasks.length) { + return callback(err, ...args); + } + nextTask(args); + } + + nextTask([]); + } + + var waterfall$1 = awaitify(waterfall); + + /** + * An "async function" in the context of Async is an asynchronous function with + * a variable number of parameters, with the final parameter being a callback. + * (`function (arg1, arg2, ..., callback) {}`) + * The final callback is of the form `callback(err, results...)`, which must be + * called once the function is completed. The callback should be called with a + * Error as its first argument to signal that an error occurred. + * Otherwise, if no error occurred, it should be called with `null` as the first + * argument, and any additional `result` arguments that may apply, to signal + * successful completion. + * The callback must be called exactly once, ideally on a later tick of the + * JavaScript event loop. + * + * This type of function is also referred to as a "Node-style async function", + * or a "continuation passing-style function" (CPS). Most of the methods of this + * library are themselves CPS/Node-style async functions, or functions that + * return CPS/Node-style async functions. + * + * Wherever we accept a Node-style async function, we also directly accept an + * [ES2017 `async` function]{@link https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Statements/async_function}. + * In this case, the `async` function will not be passed a final callback + * argument, and any thrown error will be used as the `err` argument of the + * implicit callback, and the return value will be used as the `result` value. + * (i.e. a `rejected` of the returned Promise becomes the `err` callback + * argument, and a `resolved` value becomes the `result`.) + * + * Note, due to JavaScript limitations, we can only detect native `async` + * functions and not transpilied implementations. + * Your environment must have `async`/`await` support for this to work. + * (e.g. Node > v7.6, or a recent version of a modern browser). + * If you are using `async` functions through a transpiler (e.g. Babel), you + * must still wrap the function with [asyncify]{@link module:Utils.asyncify}, + * because the `async function` will be compiled to an ordinary function that + * returns a promise. + * + * @typedef {Function} AsyncFunction + * @static + */ + + var index = { + apply, + applyEach: applyEach$1, + applyEachSeries, + asyncify, + auto, + autoInject, + cargo, + cargoQueue: cargo$1, + compose, + concat: concat$1, + concatLimit: concatLimit$1, + concatSeries: concatSeries$1, + constant, + detect: detect$1, + detectLimit: detectLimit$1, + detectSeries: detectSeries$1, + dir, + doUntil, + doWhilst: doWhilst$1, + each, + eachLimit: eachLimit$2, + eachOf: eachOf$1, + eachOfLimit: eachOfLimit$2, + eachOfSeries: eachOfSeries$1, + eachSeries: eachSeries$1, + ensureAsync, + every: every$1, + everyLimit: everyLimit$1, + everySeries: everySeries$1, + filter: filter$1, + filterLimit: filterLimit$1, + filterSeries: filterSeries$1, + forever: forever$1, + groupBy, + groupByLimit: groupByLimit$1, + groupBySeries, + log, + map: map$1, + mapLimit: mapLimit$1, + mapSeries: mapSeries$1, + mapValues, + mapValuesLimit: mapValuesLimit$1, + mapValuesSeries, + memoize, + nextTick, + parallel: parallel$1, + parallelLimit, + priorityQueue, + queue: queue$1, + race: race$1, + reduce: reduce$1, + reduceRight, + reflect, + reflectAll, + reject: reject$2, + rejectLimit: rejectLimit$1, + rejectSeries: rejectSeries$1, + retry, + retryable, + seq, + series, + setImmediate: setImmediate$1, + some: some$1, + someLimit: someLimit$1, + someSeries: someSeries$1, + sortBy: sortBy$1, + timeout, + times, + timesLimit, + timesSeries, + transform, + tryEach: tryEach$1, + unmemoize, + until, + waterfall: waterfall$1, + whilst: whilst$1, + + // aliases + all: every$1, + allLimit: everyLimit$1, + allSeries: everySeries$1, + any: some$1, + anyLimit: someLimit$1, + anySeries: someSeries$1, + find: detect$1, + findLimit: detectLimit$1, + findSeries: detectSeries$1, + flatMap: concat$1, + flatMapLimit: concatLimit$1, + flatMapSeries: concatSeries$1, + forEach: each, + forEachSeries: eachSeries$1, + forEachLimit: eachLimit$2, + forEachOf: eachOf$1, + forEachOfSeries: eachOfSeries$1, + forEachOfLimit: eachOfLimit$2, + inject: reduce$1, + foldl: reduce$1, + foldr: reduceRight, + select: filter$1, + selectLimit: filterLimit$1, + selectSeries: filterSeries$1, + wrapSync: asyncify, + during: whilst$1, + doDuring: doWhilst$1 + }; + + exports.default = index; + exports.apply = apply; + exports.applyEach = applyEach$1; + exports.applyEachSeries = applyEachSeries; + exports.asyncify = asyncify; + exports.auto = auto; + exports.autoInject = autoInject; + exports.cargo = cargo; + exports.cargoQueue = cargo$1; + exports.compose = compose; + exports.concat = concat$1; + exports.concatLimit = concatLimit$1; + exports.concatSeries = concatSeries$1; + exports.constant = constant; + exports.detect = detect$1; + exports.detectLimit = detectLimit$1; + exports.detectSeries = detectSeries$1; + exports.dir = dir; + exports.doUntil = doUntil; + exports.doWhilst = doWhilst$1; + exports.each = each; + exports.eachLimit = eachLimit$2; + exports.eachOf = eachOf$1; + exports.eachOfLimit = eachOfLimit$2; + exports.eachOfSeries = eachOfSeries$1; + exports.eachSeries = eachSeries$1; + exports.ensureAsync = ensureAsync; + exports.every = every$1; + exports.everyLimit = everyLimit$1; + exports.everySeries = everySeries$1; + exports.filter = filter$1; + exports.filterLimit = filterLimit$1; + exports.filterSeries = filterSeries$1; + exports.forever = forever$1; + exports.groupBy = groupBy; + exports.groupByLimit = groupByLimit$1; + exports.groupBySeries = groupBySeries; + exports.log = log; + exports.map = map$1; + exports.mapLimit = mapLimit$1; + exports.mapSeries = mapSeries$1; + exports.mapValues = mapValues; + exports.mapValuesLimit = mapValuesLimit$1; + exports.mapValuesSeries = mapValuesSeries; + exports.memoize = memoize; + exports.nextTick = nextTick; + exports.parallel = parallel$1; + exports.parallelLimit = parallelLimit; + exports.priorityQueue = priorityQueue; + exports.queue = queue$1; + exports.race = race$1; + exports.reduce = reduce$1; + exports.reduceRight = reduceRight; + exports.reflect = reflect; + exports.reflectAll = reflectAll; + exports.reject = reject$2; + exports.rejectLimit = rejectLimit$1; + exports.rejectSeries = rejectSeries$1; + exports.retry = retry; + exports.retryable = retryable; + exports.seq = seq; + exports.series = series; + exports.setImmediate = setImmediate$1; + exports.some = some$1; + exports.someLimit = someLimit$1; + exports.someSeries = someSeries$1; + exports.sortBy = sortBy$1; + exports.timeout = timeout; + exports.times = times; + exports.timesLimit = timesLimit; + exports.timesSeries = timesSeries; + exports.transform = transform; + exports.tryEach = tryEach$1; + exports.unmemoize = unmemoize; + exports.until = until; + exports.waterfall = waterfall$1; + exports.whilst = whilst$1; + exports.all = every$1; + exports.allLimit = everyLimit$1; + exports.allSeries = everySeries$1; + exports.any = some$1; + exports.anyLimit = someLimit$1; + exports.anySeries = someSeries$1; + exports.find = detect$1; + exports.findLimit = detectLimit$1; + exports.findSeries = detectSeries$1; + exports.flatMap = concat$1; + exports.flatMapLimit = concatLimit$1; + exports.flatMapSeries = concatSeries$1; + exports.forEach = each; + exports.forEachSeries = eachSeries$1; + exports.forEachLimit = eachLimit$2; + exports.forEachOf = eachOf$1; + exports.forEachOfSeries = eachOfSeries$1; + exports.forEachOfLimit = eachOfLimit$2; + exports.inject = reduce$1; + exports.foldl = reduce$1; + exports.foldr = reduceRight; + exports.select = filter$1; + exports.selectLimit = filterLimit$1; + exports.selectSeries = filterSeries$1; + exports.wrapSync = asyncify; + exports.during = whilst$1; + exports.doDuring = doWhilst$1; + + Object.defineProperty(exports, '__esModule', { value: true }); + +}))); + + +/***/ }), + +/***/ 64698: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +module.exports = +{ + parallel : __nccwpck_require__(81149), + serial : __nccwpck_require__(69665), + serialOrdered : __nccwpck_require__(40630) +}; + + +/***/ }), + +/***/ 64561: +/***/ ((module) => { + +// API +module.exports = abort; + +/** + * Aborts leftover active jobs + * + * @param {object} state - current state object + */ +function abort(state) +{ + Object.keys(state.jobs).forEach(clean.bind(state)); + + // reset leftover jobs + state.jobs = {}; +} + +/** + * Cleans up leftover job by invoking abort function for the provided job id + * + * @this state + * @param {string|number} key - job id to abort + */ +function clean(key) +{ + if (typeof this.jobs[key] == 'function') + { + this.jobs[key](); + } +} + + +/***/ }), + +/***/ 61107: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +var defer = __nccwpck_require__(90135); + +// API +module.exports = async; + +/** + * Runs provided callback asynchronously + * even if callback itself is not + * + * @param {function} callback - callback to invoke + * @returns {function} - augmented callback + */ +function async(callback) +{ + var isAsync = false; + + // check if async happened + defer(function() { isAsync = true; }); + + return function async_callback(err, result) + { + if (isAsync) + { + callback(err, result); + } + else + { + defer(function nextTick_callback() + { + callback(err, result); + }); + } + }; +} + + +/***/ }), + +/***/ 90135: +/***/ ((module) => { + +module.exports = defer; + +/** + * Runs provided function on next iteration of the event loop + * + * @param {function} fn - function to run + */ +function defer(fn) +{ + var nextTick = typeof setImmediate == 'function' + ? setImmediate + : ( + typeof process == 'object' && typeof process.nextTick == 'function' + ? process.nextTick + : null + ); + + if (nextTick) + { + nextTick(fn); + } + else + { + setTimeout(fn, 0); + } +} + + +/***/ }), + +/***/ 90580: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +var async = __nccwpck_require__(61107) + , abort = __nccwpck_require__(64561) + ; + +// API +module.exports = iterate; + +/** + * Iterates over each job object + * + * @param {array|object} list - array or object (named list) to iterate over + * @param {function} iterator - iterator to run + * @param {object} state - current job status + * @param {function} callback - invoked when all elements processed + */ +function iterate(list, iterator, state, callback) +{ + // store current index + var key = state['keyedList'] ? state['keyedList'][state.index] : state.index; + + state.jobs[key] = runJob(iterator, key, list[key], function(error, output) + { + // don't repeat yourself + // skip secondary callbacks + if (!(key in state.jobs)) + { + return; + } + + // clean up jobs + delete state.jobs[key]; + + if (error) + { + // don't process rest of the results + // stop still active jobs + // and reset the list + abort(state); + } + else + { + state.results[key] = output; + } + + // return salvaged results + callback(error, state.results); + }); +} + +/** + * Runs iterator over provided job element + * + * @param {function} iterator - iterator to invoke + * @param {string|number} key - key/index of the element in the list of jobs + * @param {mixed} item - job description + * @param {function} callback - invoked after iterator is done with the job + * @returns {function|mixed} - job abort function or something else + */ +function runJob(iterator, key, item, callback) +{ + var aborter; + + // allow shortcut if iterator expects only two arguments + if (iterator.length == 2) + { + aborter = iterator(item, async(callback)); + } + // otherwise go with full three arguments + else + { + aborter = iterator(item, key, async(callback)); + } + + return aborter; +} + + +/***/ }), + +/***/ 86629: +/***/ ((module) => { + +// API +module.exports = state; + +/** + * Creates initial state object + * for iteration over list + * + * @param {array|object} list - list to iterate over + * @param {function|null} sortMethod - function to use for keys sort, + * or `null` to keep them as is + * @returns {object} - initial state object + */ +function state(list, sortMethod) +{ + var isNamedList = !Array.isArray(list) + , initState = + { + index : 0, + keyedList: isNamedList || sortMethod ? Object.keys(list) : null, + jobs : {}, + results : isNamedList ? {} : [], + size : isNamedList ? Object.keys(list).length : list.length + } + ; + + if (sortMethod) + { + // sort array keys based on it's values + // sort object's keys just on own merit + initState.keyedList.sort(isNamedList ? sortMethod : function(a, b) + { + return sortMethod(list[a], list[b]); + }); + } + + return initState; +} + + +/***/ }), + +/***/ 48428: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +var abort = __nccwpck_require__(64561) + , async = __nccwpck_require__(61107) + ; + +// API +module.exports = terminator; + +/** + * Terminates jobs in the attached state context + * + * @this AsyncKitState# + * @param {function} callback - final callback to invoke after termination + */ +function terminator(callback) +{ + if (!Object.keys(this.jobs).length) + { + return; + } + + // fast forward iteration index + this.index = this.size; + + // abort jobs + abort(this); + + // send back results we have so far + async(callback)(null, this.results); +} + + +/***/ }), + +/***/ 81149: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +var iterate = __nccwpck_require__(90580) + , initState = __nccwpck_require__(86629) + , terminator = __nccwpck_require__(48428) + ; + +// Public API +module.exports = parallel; + +/** + * Runs iterator over provided array elements in parallel + * + * @param {array|object} list - array or object (named list) to iterate over + * @param {function} iterator - iterator to run + * @param {function} callback - invoked when all elements processed + * @returns {function} - jobs terminator + */ +function parallel(list, iterator, callback) +{ + var state = initState(list); + + while (state.index < (state['keyedList'] || list).length) + { + iterate(list, iterator, state, function(error, result) + { + if (error) + { + callback(error, result); + return; + } + + // looks like it's the last one + if (Object.keys(state.jobs).length === 0) + { + callback(null, state.results); + return; + } + }); + + state.index++; + } + + return terminator.bind(state, callback); +} + + +/***/ }), + +/***/ 69665: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +var serialOrdered = __nccwpck_require__(40630); + +// Public API +module.exports = serial; + +/** + * Runs iterator over provided array elements in series + * + * @param {array|object} list - array or object (named list) to iterate over + * @param {function} iterator - iterator to run + * @param {function} callback - invoked when all elements processed + * @returns {function} - jobs terminator + */ +function serial(list, iterator, callback) +{ + return serialOrdered(list, iterator, null, callback); +} + + +/***/ }), + +/***/ 40630: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +var iterate = __nccwpck_require__(90580) + , initState = __nccwpck_require__(86629) + , terminator = __nccwpck_require__(48428) + ; + +// Public API +module.exports = serialOrdered; +// sorting helpers +module.exports.ascending = ascending; +module.exports.descending = descending; + +/** + * Runs iterator over provided sorted array elements in series + * + * @param {array|object} list - array or object (named list) to iterate over + * @param {function} iterator - iterator to run + * @param {function} sortMethod - custom sort function + * @param {function} callback - invoked when all elements processed + * @returns {function} - jobs terminator + */ +function serialOrdered(list, iterator, sortMethod, callback) +{ + var state = initState(list, sortMethod); + + iterate(list, iterator, state, function iteratorHandler(error, result) + { + if (error) + { + callback(error, result); + return; + } + + state.index++; + + // are we there yet? + if (state.index < (state['keyedList'] || list).length) + { + iterate(list, iterator, state, iteratorHandler); + return; + } + + // done here + callback(null, state.results); + }); + + return terminator.bind(state, callback); +} + +/* + * -- Sort methods + */ + +/** + * sort helper to sort array elements in ascending order + * + * @param {mixed} a - an item to compare + * @param {mixed} b - an item to compare + * @returns {number} - comparison result + */ +function ascending(a, b) +{ + return a < b ? -1 : a > b ? 1 : 0; +} + +/** + * sort helper to sort array elements in descending order + * + * @param {mixed} a - an item to compare + * @param {mixed} b - an item to compare + * @returns {number} - comparison result + */ +function descending(a, b) +{ + return -1 * ascending(a, b); +} + + +/***/ }), + +/***/ 3711: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + + +/*! + * Copyright 2010 LearnBoost + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Module dependencies. + */ + +var crypto = __nccwpck_require__(6113) + , parse = (__nccwpck_require__(57310).parse) + ; + +/** + * Valid keys. + */ + +var keys = + [ 'acl' + , 'location' + , 'logging' + , 'notification' + , 'partNumber' + , 'policy' + , 'requestPayment' + , 'torrent' + , 'uploadId' + , 'uploads' + , 'versionId' + , 'versioning' + , 'versions' + , 'website' + ] + +/** + * Return an "Authorization" header value with the given `options` + * in the form of "AWS :" + * + * @param {Object} options + * @return {String} + * @api private + */ + +function authorization (options) { + return 'AWS ' + options.key + ':' + sign(options) +} + +module.exports = authorization +module.exports.authorization = authorization + +/** + * Simple HMAC-SHA1 Wrapper + * + * @param {Object} options + * @return {String} + * @api private + */ + +function hmacSha1 (options) { + return crypto.createHmac('sha1', options.secret).update(options.message).digest('base64') +} + +module.exports.hmacSha1 = hmacSha1 + +/** + * Create a base64 sha1 HMAC for `options`. + * + * @param {Object} options + * @return {String} + * @api private + */ + +function sign (options) { + options.message = stringToSign(options) + return hmacSha1(options) +} +module.exports.sign = sign + +/** + * Create a base64 sha1 HMAC for `options`. + * + * Specifically to be used with S3 presigned URLs + * + * @param {Object} options + * @return {String} + * @api private + */ + +function signQuery (options) { + options.message = queryStringToSign(options) + return hmacSha1(options) +} +module.exports.signQuery= signQuery + +/** + * Return a string for sign() with the given `options`. + * + * Spec: + * + * \n + * \n + * \n + * \n + * [headers\n] + * + * + * @param {Object} options + * @return {String} + * @api private + */ + +function stringToSign (options) { + var headers = options.amazonHeaders || '' + if (headers) headers += '\n' + var r = + [ options.verb + , options.md5 + , options.contentType + , options.date ? options.date.toUTCString() : '' + , headers + options.resource + ] + return r.join('\n') +} +module.exports.stringToSign = stringToSign + +/** + * Return a string for sign() with the given `options`, but is meant exclusively + * for S3 presigned URLs + * + * Spec: + * + * \n + * + * + * @param {Object} options + * @return {String} + * @api private + */ + +function queryStringToSign (options){ + return 'GET\n\n\n' + options.date + '\n' + options.resource +} +module.exports.queryStringToSign = queryStringToSign + +/** + * Perform the following: + * + * - ignore non-amazon headers + * - lowercase fields + * - sort lexicographically + * - trim whitespace between ":" + * - join with newline + * + * @param {Object} headers + * @return {String} + * @api private + */ + +function canonicalizeHeaders (headers) { + var buf = [] + , fields = Object.keys(headers) + ; + for (var i = 0, len = fields.length; i < len; ++i) { + var field = fields[i] + , val = headers[field] + , field = field.toLowerCase() + ; + if (0 !== field.indexOf('x-amz')) continue + buf.push(field + ':' + val) + } + return buf.sort().join('\n') +} +module.exports.canonicalizeHeaders = canonicalizeHeaders + +/** + * Perform the following: + * + * - ignore non sub-resources + * - sort lexicographically + * + * @param {String} resource + * @return {String} + * @api private + */ + +function canonicalizeResource (resource) { + var url = parse(resource, true) + , path = url.pathname + , buf = [] + ; + + Object.keys(url.query).forEach(function(key){ + if (!~keys.indexOf(key)) return + var val = '' == url.query[key] ? '' : '=' + encodeURIComponent(url.query[key]) + buf.push(key + val) + }) + + return path + (buf.length ? '?' + buf.sort().join('&') : '') +} +module.exports.canonicalizeResource = canonicalizeResource + + +/***/ }), + +/***/ 47526: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { + +var aws4 = exports, + url = __nccwpck_require__(57310), + querystring = __nccwpck_require__(63477), + crypto = __nccwpck_require__(6113), + lru = __nccwpck_require__(28785), + credentialsCache = lru(1000) + +// http://docs.amazonwebservices.com/general/latest/gr/signature-version-4.html + +function hmac(key, string, encoding) { + return crypto.createHmac('sha256', key).update(string, 'utf8').digest(encoding) +} + +function hash(string, encoding) { + return crypto.createHash('sha256').update(string, 'utf8').digest(encoding) +} + +// This function assumes the string has already been percent encoded +function encodeRfc3986(urlEncodedString) { + return urlEncodedString.replace(/[!'()*]/g, function(c) { + return '%' + c.charCodeAt(0).toString(16).toUpperCase() + }) +} + +function encodeRfc3986Full(str) { + return encodeRfc3986(encodeURIComponent(str)) +} + +// request: { path | body, [host], [method], [headers], [service], [region] } +// credentials: { accessKeyId, secretAccessKey, [sessionToken] } +function RequestSigner(request, credentials) { + + if (typeof request === 'string') request = url.parse(request) + + var headers = request.headers = (request.headers || {}), + hostParts = (!this.service || !this.region) && this.matchHost(request.hostname || request.host || headers.Host || headers.host) + + this.request = request + this.credentials = credentials || this.defaultCredentials() + + this.service = request.service || hostParts[0] || '' + this.region = request.region || hostParts[1] || 'us-east-1' + + // SES uses a different domain from the service name + if (this.service === 'email') this.service = 'ses' + + if (!request.method && request.body) + request.method = 'POST' + + if (!headers.Host && !headers.host) { + headers.Host = request.hostname || request.host || this.createHost() + + // If a port is specified explicitly, use it as is + if (request.port) + headers.Host += ':' + request.port + } + if (!request.hostname && !request.host) + request.hostname = headers.Host || headers.host + + this.isCodeCommitGit = this.service === 'codecommit' && request.method === 'GIT' +} + +RequestSigner.prototype.matchHost = function(host) { + var match = (host || '').match(/([^\.]+)\.(?:([^\.]*)\.)?amazonaws\.com(\.cn)?$/) + var hostParts = (match || []).slice(1, 3) + + // ES's hostParts are sometimes the other way round, if the value that is expected + // to be region equals ‘es’ switch them back + // e.g. search-cluster-name-aaaa00aaaa0aaa0aaaaaaa0aaa.us-east-1.es.amazonaws.com + if (hostParts[1] === 'es') + hostParts = hostParts.reverse() + + if (hostParts[1] == 's3') { + hostParts[0] = 's3' + hostParts[1] = 'us-east-1' + } else { + for (var i = 0; i < 2; i++) { + if (/^s3-/.test(hostParts[i])) { + hostParts[1] = hostParts[i].slice(3) + hostParts[0] = 's3' + break + } + } + } + + return hostParts +} + +// http://docs.aws.amazon.com/general/latest/gr/rande.html +RequestSigner.prototype.isSingleRegion = function() { + // Special case for S3 and SimpleDB in us-east-1 + if (['s3', 'sdb'].indexOf(this.service) >= 0 && this.region === 'us-east-1') return true + + return ['cloudfront', 'ls', 'route53', 'iam', 'importexport', 'sts'] + .indexOf(this.service) >= 0 +} + +RequestSigner.prototype.createHost = function() { + var region = this.isSingleRegion() ? '' : '.' + this.region, + subdomain = this.service === 'ses' ? 'email' : this.service + return subdomain + region + '.amazonaws.com' +} + +RequestSigner.prototype.prepareRequest = function() { + this.parsePath() + + var request = this.request, headers = request.headers, query + + if (request.signQuery) { + + this.parsedPath.query = query = this.parsedPath.query || {} + + if (this.credentials.sessionToken) + query['X-Amz-Security-Token'] = this.credentials.sessionToken + + if (this.service === 's3' && !query['X-Amz-Expires']) + query['X-Amz-Expires'] = 86400 + + if (query['X-Amz-Date']) + this.datetime = query['X-Amz-Date'] + else + query['X-Amz-Date'] = this.getDateTime() + + query['X-Amz-Algorithm'] = 'AWS4-HMAC-SHA256' + query['X-Amz-Credential'] = this.credentials.accessKeyId + '/' + this.credentialString() + query['X-Amz-SignedHeaders'] = this.signedHeaders() + + } else { + + if (!request.doNotModifyHeaders && !this.isCodeCommitGit) { + if (request.body && !headers['Content-Type'] && !headers['content-type']) + headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=utf-8' + + if (request.body && !headers['Content-Length'] && !headers['content-length']) + headers['Content-Length'] = Buffer.byteLength(request.body) + + if (this.credentials.sessionToken && !headers['X-Amz-Security-Token'] && !headers['x-amz-security-token']) + headers['X-Amz-Security-Token'] = this.credentials.sessionToken + + if (this.service === 's3' && !headers['X-Amz-Content-Sha256'] && !headers['x-amz-content-sha256']) + headers['X-Amz-Content-Sha256'] = hash(this.request.body || '', 'hex') + + if (headers['X-Amz-Date'] || headers['x-amz-date']) + this.datetime = headers['X-Amz-Date'] || headers['x-amz-date'] + else + headers['X-Amz-Date'] = this.getDateTime() + } + + delete headers.Authorization + delete headers.authorization + } +} + +RequestSigner.prototype.sign = function() { + if (!this.parsedPath) this.prepareRequest() + + if (this.request.signQuery) { + this.parsedPath.query['X-Amz-Signature'] = this.signature() + } else { + this.request.headers.Authorization = this.authHeader() + } + + this.request.path = this.formatPath() + + return this.request +} + +RequestSigner.prototype.getDateTime = function() { + if (!this.datetime) { + var headers = this.request.headers, + date = new Date(headers.Date || headers.date || new Date) + + this.datetime = date.toISOString().replace(/[:\-]|\.\d{3}/g, '') + + // Remove the trailing 'Z' on the timestamp string for CodeCommit git access + if (this.isCodeCommitGit) this.datetime = this.datetime.slice(0, -1) + } + return this.datetime +} + +RequestSigner.prototype.getDate = function() { + return this.getDateTime().substr(0, 8) +} + +RequestSigner.prototype.authHeader = function() { + return [ + 'AWS4-HMAC-SHA256 Credential=' + this.credentials.accessKeyId + '/' + this.credentialString(), + 'SignedHeaders=' + this.signedHeaders(), + 'Signature=' + this.signature(), + ].join(', ') +} + +RequestSigner.prototype.signature = function() { + var date = this.getDate(), + cacheKey = [this.credentials.secretAccessKey, date, this.region, this.service].join(), + kDate, kRegion, kService, kCredentials = credentialsCache.get(cacheKey) + if (!kCredentials) { + kDate = hmac('AWS4' + this.credentials.secretAccessKey, date) + kRegion = hmac(kDate, this.region) + kService = hmac(kRegion, this.service) + kCredentials = hmac(kService, 'aws4_request') + credentialsCache.set(cacheKey, kCredentials) + } + return hmac(kCredentials, this.stringToSign(), 'hex') +} + +RequestSigner.prototype.stringToSign = function() { + return [ + 'AWS4-HMAC-SHA256', + this.getDateTime(), + this.credentialString(), + hash(this.canonicalString(), 'hex'), + ].join('\n') +} + +RequestSigner.prototype.canonicalString = function() { + if (!this.parsedPath) this.prepareRequest() + + var pathStr = this.parsedPath.path, + query = this.parsedPath.query, + headers = this.request.headers, + queryStr = '', + normalizePath = this.service !== 's3', + decodePath = this.service === 's3' || this.request.doNotEncodePath, + decodeSlashesInPath = this.service === 's3', + firstValOnly = this.service === 's3', + bodyHash + + if (this.service === 's3' && this.request.signQuery) { + bodyHash = 'UNSIGNED-PAYLOAD' + } else if (this.isCodeCommitGit) { + bodyHash = '' + } else { + bodyHash = headers['X-Amz-Content-Sha256'] || headers['x-amz-content-sha256'] || + hash(this.request.body || '', 'hex') + } + + if (query) { + var reducedQuery = Object.keys(query).reduce(function(obj, key) { + if (!key) return obj + obj[encodeRfc3986Full(key)] = !Array.isArray(query[key]) ? query[key] : + (firstValOnly ? query[key][0] : query[key]) + return obj + }, {}) + var encodedQueryPieces = [] + Object.keys(reducedQuery).sort().forEach(function(key) { + if (!Array.isArray(reducedQuery[key])) { + encodedQueryPieces.push(key + '=' + encodeRfc3986Full(reducedQuery[key])) + } else { + reducedQuery[key].map(encodeRfc3986Full).sort() + .forEach(function(val) { encodedQueryPieces.push(key + '=' + val) }) + } + }) + queryStr = encodedQueryPieces.join('&') + } + if (pathStr !== '/') { + if (normalizePath) pathStr = pathStr.replace(/\/{2,}/g, '/') + pathStr = pathStr.split('/').reduce(function(path, piece) { + if (normalizePath && piece === '..') { + path.pop() + } else if (!normalizePath || piece !== '.') { + if (decodePath) piece = decodeURIComponent(piece).replace(/\+/g, ' ') + path.push(encodeRfc3986Full(piece)) + } + return path + }, []).join('/') + if (pathStr[0] !== '/') pathStr = '/' + pathStr + if (decodeSlashesInPath) pathStr = pathStr.replace(/%2F/g, '/') + } + + return [ + this.request.method || 'GET', + pathStr, + queryStr, + this.canonicalHeaders() + '\n', + this.signedHeaders(), + bodyHash, + ].join('\n') +} + +RequestSigner.prototype.canonicalHeaders = function() { + var headers = this.request.headers + function trimAll(header) { + return header.toString().trim().replace(/\s+/g, ' ') + } + return Object.keys(headers) + .sort(function(a, b) { return a.toLowerCase() < b.toLowerCase() ? -1 : 1 }) + .map(function(key) { return key.toLowerCase() + ':' + trimAll(headers[key]) }) + .join('\n') +} + +RequestSigner.prototype.signedHeaders = function() { + return Object.keys(this.request.headers) + .map(function(key) { return key.toLowerCase() }) + .sort() + .join(';') +} + +RequestSigner.prototype.credentialString = function() { + return [ + this.getDate(), + this.region, + this.service, + 'aws4_request', + ].join('/') +} + +RequestSigner.prototype.defaultCredentials = function() { + var env = process.env + return { + accessKeyId: env.AWS_ACCESS_KEY_ID || env.AWS_ACCESS_KEY, + secretAccessKey: env.AWS_SECRET_ACCESS_KEY || env.AWS_SECRET_KEY, + sessionToken: env.AWS_SESSION_TOKEN, + } +} + +RequestSigner.prototype.parsePath = function() { + var path = this.request.path || '/' + + // S3 doesn't always encode characters > 127 correctly and + // all services don't encode characters > 255 correctly + // So if there are non-reserved chars (and it's not already all % encoded), just encode them all + if (/[^0-9A-Za-z;,/?:@&=+$\-_.!~*'()#%]/.test(path)) { + path = encodeURI(decodeURI(path)) + } + + var queryIx = path.indexOf('?'), + query = null + + if (queryIx >= 0) { + query = querystring.parse(path.slice(queryIx + 1)) + path = path.slice(0, queryIx) + } + + this.parsedPath = { + path: path, + query: query, + } +} + +RequestSigner.prototype.formatPath = function() { + var path = this.parsedPath.path, + query = this.parsedPath.query + + if (!query) return path + + // Services don't support empty query string keys + if (query[''] != null) delete query[''] + + return path + '?' + encodeRfc3986(querystring.stringify(query)) +} + +aws4.RequestSigner = RequestSigner + +aws4.sign = function(request, credentials) { + return new RequestSigner(request, credentials).sign() +} + + +/***/ }), + +/***/ 28785: +/***/ ((module) => { + +module.exports = function(size) { + return new LruCache(size) +} + +function LruCache(size) { + this.capacity = size | 0 + this.map = Object.create(null) + this.list = new DoublyLinkedList() +} + +LruCache.prototype.get = function(key) { + var node = this.map[key] + if (node == null) return undefined + this.used(node) + return node.val +} + +LruCache.prototype.set = function(key, val) { + var node = this.map[key] + if (node != null) { + node.val = val + } else { + if (!this.capacity) this.prune() + if (!this.capacity) return false + node = new DoublyLinkedNode(key, val) + this.map[key] = node + this.capacity-- + } + this.used(node) + return true +} + +LruCache.prototype.used = function(node) { + this.list.moveToFront(node) +} + +LruCache.prototype.prune = function() { + var node = this.list.pop() + if (node != null) { + delete this.map[node.key] + this.capacity++ + } +} + + +function DoublyLinkedList() { + this.firstNode = null + this.lastNode = null +} + +DoublyLinkedList.prototype.moveToFront = function(node) { + if (this.firstNode == node) return + + this.remove(node) + + if (this.firstNode == null) { + this.firstNode = node + this.lastNode = node + node.prev = null + node.next = null + } else { + node.prev = null + node.next = this.firstNode + node.next.prev = node + this.firstNode = node + } +} + +DoublyLinkedList.prototype.pop = function() { + var lastNode = this.lastNode + if (lastNode != null) { + this.remove(lastNode) + } + return lastNode +} + +DoublyLinkedList.prototype.remove = function(node) { + if (this.firstNode == node) { + this.firstNode = node.next + } else if (node.prev != null) { + node.prev.next = node.next + } + if (this.lastNode == node) { + this.lastNode = node.prev + } else if (node.next != null) { + node.next.prev = node.prev + } +} + + +function DoublyLinkedNode(key, val) { + this.key = key + this.val = val + this.prev = null + this.next = null +} + + +/***/ }), + +/***/ 73015: +/***/ (function(__unused_webpack_module, exports, __nccwpck_require__) { + +"use strict"; + +var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) { + function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); } + return new (P || (P = Promise))(function (resolve, reject) { + function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } } + function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } } + function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); } + step((generator = generator.apply(thisArg, _arguments || [])).next()); + }); +}; +Object.defineProperty(exports, "__esModule", ({ value: true })); +const core = __nccwpck_require__(87454); +const exec = __nccwpck_require__(33683); +const io = __nccwpck_require__(27692); +class AzureCLIAuthorizer { + constructor() { + this._token = ''; + this._subscriptionId = ''; + this._cloudSuffixes = {}; + this._cloudEndpoints = {}; + } + static getAuthorizer() { + return __awaiter(this, void 0, void 0, function* () { + if (!this._authorizer) { + this._authorizer = new AzureCLIAuthorizer(); + yield this._authorizer._initialize(); + } + return this._authorizer; + }); + } + get subscriptionID() { + return this._subscriptionId; + } + get baseUrl() { + return this._cloudEndpoints['resourceManager'] || 'https://management.azure.com/'; + } + getCloudSuffixUrl(suffixName) { + return this._cloudSuffixes[suffixName]; + } + getCloudEndpointUrl(endpointName) { + return this._cloudEndpoints[endpointName]; + } + getToken(force, args) { + return __awaiter(this, void 0, void 0, function* () { + if (!this._token || force) { + try { + let azAccessToken = JSON.parse(yield AzureCLIAuthorizer.executeAzCliCommand('account get-access-token', !!args ? args : [])); + core.setSecret(azAccessToken); + this._token = azAccessToken['accessToken']; + } + catch (error) { + console.log('Failed to fetch Azure access token'); + throw error; + } + } + return this._token; + }); + } + static executeAzCliCommand(command, args) { + return __awaiter(this, void 0, void 0, function* () { + let azCliPath = yield AzureCLIAuthorizer._getAzCliPath(); + let stdout = ''; + let stderr = ''; + try { + core.debug(`"${azCliPath}" ${command}`); + yield exec.exec(`"${azCliPath}" ${command}`, args, { + silent: true, + listeners: { + stdout: (data) => { + stdout += data.toString(); + }, + stderr: (data) => { + stderr += data.toString(); + } + } + }); + } + catch (error) { + throw new Error(stderr); + } + return stdout; + }); + } + static _getAzCliPath() { + return __awaiter(this, void 0, void 0, function* () { + if (!this._azCliPath) { + this._azCliPath = yield io.which('az', true); + } + return this._azCliPath; + }); + } + _initialize() { + return __awaiter(this, void 0, void 0, function* () { + let azAccountDetails = JSON.parse(yield AzureCLIAuthorizer.executeAzCliCommand('account show')); + let azCloudDetails = JSON.parse(yield AzureCLIAuthorizer.executeAzCliCommand('cloud show')); + this._subscriptionId = azAccountDetails['id']; + this._cloudSuffixes = azCloudDetails['suffixes']; + this._cloudEndpoints = azCloudDetails['endpoints']; + }); + } +} +exports.AzureCLIAuthorizer = AzureCLIAuthorizer; + + +/***/ }), + +/***/ 52825: +/***/ (function(__unused_webpack_module, exports, __nccwpck_require__) { + +"use strict"; + +var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) { + function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); } + return new (P || (P = Promise))(function (resolve, reject) { + function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } } + function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } } + function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); } + step((generator = generator.apply(thisArg, _arguments || [])).next()); + }); +}; +Object.defineProperty(exports, "__esModule", ({ value: true })); +const core = __nccwpck_require__(87454); +const AzureCLIAuthorizer_1 = __nccwpck_require__(73015); +class AuthorizerFactory { + static getAuthorizer() { + return __awaiter(this, void 0, void 0, function* () { + core.debug('try-get AzureCLIAuthorizer'); + try { + return yield AzureCLIAuthorizer_1.AzureCLIAuthorizer.getAuthorizer(); + } + catch (error) { + core.debug(error); + throw new Error("No credentails found. Add an Azure login action before this action. For more details refer https://github.com/azure/login"); + } + }); + } +} +exports.AuthorizerFactory = AuthorizerFactory; + + +/***/ }), + +/***/ 66120: +/***/ (function(__unused_webpack_module, exports, __nccwpck_require__) { + +"use strict"; + +var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) { + function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); } + return new (P || (P = Promise))(function (resolve, reject) { + function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } } + function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } } + function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); } + step((generator = generator.apply(thisArg, _arguments || [])).next()); + }); +}; +Object.defineProperty(exports, "__esModule", ({ value: true })); +const core = __nccwpck_require__(87454); +const WebClient_1 = __nccwpck_require__(93422); +class ApiResult { + constructor(error, result, request, response) { + this.error = error; + this.result = result; + this.request = request; + this.response = response; + } +} +exports.ApiResult = ApiResult; +class AzureError { +} +exports.AzureError = AzureError; +function ToError(response) { + let error = new AzureError(); + error.statusCode = response.statusCode; + error.message = response.body; + if (response.body && response.body.error) { + error.code = response.body.error.code; + error.message = response.body.error.message; + error.details = response.body.error.details; + core.error(error.message); + } + return error; +} +exports.ToError = ToError; +class ServiceClient { + constructor(authorizer, timeout) { + this._webClient = new WebClient_1.WebClient(); + this._authorizer = authorizer; + this.subscriptionId = this._authorizer.subscriptionID; + this.baseUrl = this._authorizer.baseUrl; + this.longRunningOperationRetryTimeout = !!timeout ? timeout : 0; // In minutes + } + getRequestUri(uriFormat, parameters, queryParameters, apiVersion) { + return this.getRequestUriForbaseUrl(this.baseUrl, uriFormat, parameters, queryParameters, apiVersion); + } + getRequestUriForbaseUrl(baseUrl, uriFormat, parameters, queryParameters, apiVersion) { + let requestUri = baseUrl + uriFormat; + requestUri = requestUri.replace('{subscriptionId}', encodeURIComponent(this.subscriptionId)); + for (let key in parameters) { + requestUri = requestUri.replace(key, encodeURIComponent(parameters[key])); + } + // trim all duplicate forward slashes in the url + let regex = /([^:]\/)\/+/gi; + requestUri = requestUri.replace(regex, '$1'); + // process query paramerters + queryParameters = queryParameters || []; + if (!!apiVersion) { + queryParameters.push('api-version=' + encodeURIComponent(apiVersion)); + } + if (queryParameters.length > 0) { + requestUri += '?' + queryParameters.join('&'); + } + return requestUri; + } + beginRequest(request, tokenArgs) { + return __awaiter(this, void 0, void 0, function* () { + let token = yield this._authorizer.getToken(false, tokenArgs); + request.headers = request.headers || {}; + request.headers['Authorization'] = `Bearer ${token}`; + request.headers['Content-Type'] = 'application/json; charset=utf-8'; + let httpResponse = null; + try { + httpResponse = yield this._webClient.sendRequest(request); + if (httpResponse.statusCode === 401 && httpResponse.body && httpResponse.body.error && httpResponse.body.error.code === "ExpiredAuthenticationToken") { + // The access token might have expire. Re-issue the request after refreshing the token. + token = yield this._authorizer.getToken(true, tokenArgs); + request.headers['Authorization'] = `Bearer ${token}`; + httpResponse = yield this._webClient.sendRequest(request); + } + } + catch (exception) { + let exceptionString = exception.toString(); + if (exceptionString.indexOf("Hostname/IP doesn't match certificates's altnames") != -1 + || exceptionString.indexOf("unable to verify the first certificate") != -1 + || exceptionString.indexOf("unable to get local issuer certificate") != -1) { + core.warning("You're probably using a self-signed certificate in the SSL certificate validation chain. To resolve them you need to export a variable named ACTIONS_AZURE_REST_IGNORE_SSL_ERRORS to the value true."); + throw exception; + } + } + return httpResponse; + }); + } + accumulateResultFromPagedResult(nextLinkUrl) { + return __awaiter(this, void 0, void 0, function* () { + let result = []; + while (!!nextLinkUrl) { + let nextRequest = { + method: 'GET', + uri: nextLinkUrl + }; + let response = yield this.beginRequest(nextRequest); + if (response.statusCode == 200 && response.body) { + if (response.body.value) { + result = result.concat(response.body.value); + } + nextLinkUrl = response.body.nextLink; + } + else { + return new ApiResult(ToError(response)); + } + } + return new ApiResult(null, result); + }); + } +} +exports.ServiceClient = ServiceClient; + + +/***/ }), + +/***/ 74963: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { + +"use strict"; + +Object.defineProperty(exports, "__esModule", ({ value: true })); +const HttpClient_1 = __nccwpck_require__(85448); +class RequestClient { + constructor() { + // Singleton pattern: block from public construction + RequestClient._options = {}; + let ignoreSslErrors = `${process.env.ACTIONS_AZURE_REST_IGNORE_SSL_ERRORS}`; + RequestClient._options.ignoreSslError = !!ignoreSslErrors && ignoreSslErrors.toLowerCase() === "true"; + RequestClient._instance = new HttpClient_1.HttpClient(`${process.env.AZURE_HTTP_USER_AGENT}`, undefined, RequestClient._options); + } + static GetInstance() { + if (RequestClient._instance === undefined) { + new RequestClient(); + } + return RequestClient._instance; + } + static SetOptions(newOptions) { + RequestClient._options = newOptions; + } +} +exports.RequestClient = RequestClient; + + +/***/ }), + +/***/ 93422: +/***/ (function(__unused_webpack_module, exports, __nccwpck_require__) { + +"use strict"; + +var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) { + function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); } + return new (P || (P = Promise))(function (resolve, reject) { + function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } } + function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } } + function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); } + step((generator = generator.apply(thisArg, _arguments || [])).next()); + }); +}; +Object.defineProperty(exports, "__esModule", ({ value: true })); +const core = __nccwpck_require__(87454); +const fs = __nccwpck_require__(57147); +const RequestClient_1 = __nccwpck_require__(74963); +const DEFAULT_RETRIABLE_ERROR_CODES = ["ETIMEDOUT", "ECONNRESET", "ENOTFOUND", "ESOCKETTIMEDOUT", "ECONNREFUSED", "EHOSTUNREACH", "EPIPE", "EA_AGAIN"]; +const DEFAULT_RETRIABLE_STATUS_CODES = [408, 409, 500, 502, 503, 504]; +const DEFAULT_RETRY_COUNT = 5; +const DEFAULT_RETRY_INTERVAL_SECONDS = 2; +class WebClient { + constructor() { + this._httpClient = RequestClient_1.RequestClient.GetInstance(); + } + sendRequest(request, options) { + return __awaiter(this, void 0, void 0, function* () { + let i = 0; + let retryCount = options && options.retryCount ? options.retryCount : DEFAULT_RETRY_COUNT; + let retryIntervalInSeconds = options && options.retryIntervalInSeconds ? options.retryIntervalInSeconds : DEFAULT_RETRY_INTERVAL_SECONDS; + let retriableErrorCodes = options && options.retriableErrorCodes ? options.retriableErrorCodes : DEFAULT_RETRIABLE_ERROR_CODES; + let retriableStatusCodes = options && options.retriableStatusCodes ? options.retriableStatusCodes : DEFAULT_RETRIABLE_STATUS_CODES; + let timeToWait = retryIntervalInSeconds; + while (true) { + try { + if (request.body && typeof (request.body) !== 'string' && !request.body["readable"]) { + request.body = fs.createReadStream(request.body["path"]); + } + let response = yield this._sendRequestInternal(request); + if (retriableStatusCodes.indexOf(response.statusCode) != -1 && ++i < retryCount) { + core.debug(`Encountered a retriable status code: ${response.statusCode}. Message: '${response.statusMessage}'.`); + yield this._sleep(timeToWait); + timeToWait = timeToWait * retryIntervalInSeconds + retryIntervalInSeconds; + continue; + } + return response; + } + catch (error) { + if (retriableErrorCodes.indexOf(error.code) != -1 && ++i < retryCount) { + core.debug(`Encountered a retriable error:${error.code}. Message: ${error.message}.`); + yield this._sleep(timeToWait); + timeToWait = timeToWait * retryIntervalInSeconds + retryIntervalInSeconds; + } + else { + if (error.code) { + core.error(error.code); + } + throw error; + } + } + } + }); + } + _sendRequestInternal(request) { + return __awaiter(this, void 0, void 0, function* () { + core.debug(`[${request.method}] ${request.uri}`); + let response = yield this._httpClient.request(request.method, request.uri, request.body || '', request.headers); + if (!response) { + throw new Error(`Unexpected end of request. Http request: [${request.method}] ${request.uri} returned a null Http response.`); + } + return yield this._toWebResponse(response); + }); + } + _toWebResponse(response) { + return __awaiter(this, void 0, void 0, function* () { + let resBody; + let body = yield response.readBody(); + if (!!body) { + try { + resBody = JSON.parse(body); + } + catch (error) { + core.debug(`Could not parse response body.`); + core.debug(JSON.stringify(error)); + } + } + return { + statusCode: response.message.statusCode, + statusMessage: response.message.statusMessage, + headers: response.message.headers, + body: resBody || body + }; + }); + } + _sleep(sleepDurationInSeconds) { + return new Promise((resolve) => { + setTimeout(resolve, sleepDurationInSeconds * 1000); + }); + } +} +exports.WebClient = WebClient; + + +/***/ }), + +/***/ 10732: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +var exports = module.exports; + +/** +* Creates a connection string that can be used to create a service which runs on the storage emulator. The emulator must be downloaded separately. +* +* @param {string} [proxyUri] The proxyUri. By default, http://127.0.0.1 +* @return {string} A connection string representing the development storage credentials. +* @example +* var azure = require('azure-storage'); +* var devStoreCreds = azure.generateDevelopmentStorageCredentials(); +* var blobService = azure.createBlobService(devStoreCreds); +*/ +exports.generateDevelopmentStorageCredentials = function (proxyUri) { + var devStore = 'UseDevelopmentStorage=true;'; + if(proxyUri){ + devStore += 'DevelopmentStorageProxyUri=' + proxyUri; + } + + return devStore; +}; + +/** + * Table client exports. + * @ignore + */ +var TableService = __nccwpck_require__(8920); + +exports.TableService = TableService; +exports.TableQuery = __nccwpck_require__(13054); +exports.TableBatch = __nccwpck_require__(72394); +exports.TableUtilities = __nccwpck_require__(581); + +/** +* Creates a new {@link TableService} object. +* If no storageaccount or storageaccesskey are provided, the AZURE_STORAGE_CONNECTION_STRING and then the AZURE_STORAGE_ACCOUNT and AZURE_STORAGE_ACCESS_KEY +* environment variables will be used. +* +* @param {string} [storageAccountOrConnectionString] The storage account or the connection string. +* @param {string} [storageAccessKey] The storage access key. +* @param {string|object} [host] The host address. To define primary only, pass a string. +* Otherwise 'host.primaryHost' defines the primary host and 'host.secondaryHost' defines the secondary host. +* @return {TableService} A new TableService object. +* +*/ +exports.createTableService = function (storageAccountOrConnectionString, storageAccessKey, host) { + return new TableService(storageAccountOrConnectionString, storageAccessKey, host); +}; + +/** +* Creates a new {@link TableService} object using the host Uri and the SAS credentials provided. +* +* @param {string|object} host The host address. To define primary only, pass a string. +* Otherwise 'host.primaryHost' defines the primary host and 'host.secondaryHost' defines the secondary host. +* @param {string} sasToken The Shared Access Signature token. +* @return {TableService} A new TableService object with the SAS credentials. +*/ +exports.createTableServiceWithSas = function (hostUri, sasToken) { + return new TableService(null, null, hostUri, sasToken); +}; + +/** + * Blob client exports. + * @ignore + */ +var BlobService = __nccwpck_require__(50716); + +exports.BlobService = BlobService; +exports.BlobUtilities = __nccwpck_require__(89959); + +/** +* Creates a new {@link BlobService} object. +* If no storageaccount or storageaccesskey are provided, the AZURE_STORAGE_CONNECTION_STRING and then the AZURE_STORAGE_ACCOUNT and AZURE_STORAGE_ACCESS_KEY +* environment variables will be used. +* +* @param {string} storageAccountOrConnectionString The storage account or the connection string. +* @param {string} [storageAccessKey] The storage access key. +* @param {string|object} [host] The host address. To define primary only, pass a string. +* Otherwise 'host.primaryHost' defines the primary host and 'host.secondaryHost' defines the secondary host. +* @return {BlobService} A new BlobService object. +*/ +exports.createBlobService = function (storageAccountOrConnectionString, storageAccessKey, host) { + return new BlobService(storageAccountOrConnectionString, storageAccessKey, host, null); +}; + +/** +* Creates a new {@link BlobService} object using the host Uri and the SAS credentials provided. +* +* @param {string|object} host The host address. To define primary only, pass a string. +* Otherwise 'host.primaryHost' defines the primary host and 'host.secondaryHost' defines the secondary host. +* @param {string} sasToken The Shared Access Signature token. +* @return {BlobService} A new BlobService object with the SAS credentials. +*/ +exports.createBlobServiceWithSas = function (host, sasToken) { + return new BlobService(null, null, host, sasToken); +}; + +/** +* Creates a new {@link BlobService} object using the host Uri and the {@link TokenCredential} provided, which supports OAuth. +* +* @param {string|object} host The host address. To define primary only, pass a string. +* Otherwise 'host.primaryHost' defines the primary host and 'host.secondaryHost' defines the secondary host. +* @param {TokenCredential} tokenCredential The token credential object. +* @return {BlobService} A new BlobService object with the {@link TokenCredential} object. +* +* @example +* var azure = require('azure-storage'); +* var tokenCredential = new azure.TokenCredential('myOAuthAccessToken'); +* var blobService = azure.createBlobServiceWithTokenCredential('https://account.blob.core.windows.net', tokenCredential); +* tokenCredential.set('updatedOAuthAccessToken'); +*/ +exports.createBlobServiceWithTokenCredential = function (host, tokenCredential) { + return new BlobService(null, null, host, null, null, tokenCredential); +}; + +/** +* Creates a new {@link BlobService} object using the host uri and anonymous access. +* +* @param {string|object} host The host address. To define primary only, pass a string. +* Otherwise 'host.primaryHost' defines the primary host and 'host.secondaryHost' defines the secondary host. +* @return {BlobService} A new BlobService object with the anonymous credentials. +*/ +exports.createBlobServiceAnonymous = function (host) { + return new BlobService(null, null, host, null); +}; + +/** + * File client exports. + * @ignore + */ +var FileService = __nccwpck_require__(50007); + +exports.FileService = FileService; +exports.FileUtilities = __nccwpck_require__(83656); + +/** +* Creates a new {@link FileService} object. +* If no storageaccount or storageaccesskey are provided, the AZURE_STORAGE_CONNECTION_STRING and then the AZURE_STORAGE_ACCOUNT and AZURE_STORAGE_ACCESS_KEY +* environment variables will be used. +* +* @param {string} storageAccountOrConnectionString The storage account or the connection string. +* @param {string} [storageAccessKey] The storage access key. +* @param {string|object} [host] The host address. To define primary only, pass a string. +* Otherwise 'host.primaryHost' defines the primary host and 'host.secondaryHost' defines the secondary host. +* @return {FileService} A new FileService object. +*/ +exports.createFileService = function (storageAccountOrConnectionString, storageAccessKey, host) { + return new FileService(storageAccountOrConnectionString, storageAccessKey, host); +}; + +/** +* Creates a new {@link FileService} object using the host Uri and the SAS credentials provided. +* +* @param {string|object} host The host address. To define primary only, pass a string. +* Otherwise 'host.primaryHost' defines the primary host and 'host.secondaryHost' defines the secondary host. +* @param {string} sasToken The Shared Access Signature token. +* @return {FileService} A new FileService object with the SAS credentials. +*/ +exports.createFileServiceWithSas = function (hostUri, sasToken) { + return new FileService(null, null, hostUri, sasToken); +}; + +/** + * Queue client exports. + * @ignore + */ +var QueueService = __nccwpck_require__(67948); + +exports.QueueService = QueueService; +exports.QueueUtilities = __nccwpck_require__(98801); +exports.QueueMessageEncoder = __nccwpck_require__(23474); + +/** +* Creates a new {@link QueueService} object. +* If no storageaccount or storageaccesskey are provided, the AZURE_STORAGE_CONNECTION_STRING and then the AZURE_STORAGE_ACCOUNT and AZURE_STORAGE_ACCESS_KEY +* environment variables will be used. +* +* @param {string} [storageAccountOrConnectionString] The storage account or the connection string. +* @param {string} [storageAccessKey] The storage access key. +* @param {string|object} [host] The host address. To define primary only, pass a string. +* Otherwise 'host.primaryHost' defines the primary host and 'host.secondaryHost' defines the secondary host. +* @return {QueueService} A new QueueService object. +*/ +exports.createQueueService = function (storageAccountOrConnectionString, storageAccessKey, host) { + return new QueueService(storageAccountOrConnectionString, storageAccessKey, host); +}; + +/** +* Creates a new {@link QueueService} object using the host Uri and the SAS credentials provided. +* +* @param {string|object} host The host address. To define primary only, pass a string. +* Otherwise 'host.primaryHost' defines the primary host and 'host.secondaryHost' defines the secondary host. +* @param {string} sasToken The Shared Access Signature token. +* @return {QueueService} A new QueueService object with the SAS credentials. +*/ +exports.createQueueServiceWithSas = function(hostUri, sasToken) { + return new QueueService(null, null, hostUri, sasToken); +}; + +/** +* Creates a new {@link QueueService} object using the host Uri and the {@link TokenCredential} provided, which supports OAuth. +* +* @param {string|object} host The host address. To define primary only, pass a string. +* Otherwise 'host.primaryHost' defines the primary host and 'host.secondaryHost' defines the secondary host. +* @param {TokenCredential} tokenCredential The TokenCredential object. +* @return {QueueService} A new QueueService object with the {@link TokenCredential} object. +* +* @example +* var azure = require('azure-storage'); +* var tokenCredential = new azure.TokenCredential('myOAuthAccessToken'); +* var queueService = azure.createQueueServiceWithTokenCredential('https://account.queue.core.windows.net', tokenCredential); +* tokenCredential.set('updatedOAuthAccessToken'); +*/ +exports.createQueueServiceWithTokenCredential = function (host, tokenCredential) { + return new QueueService(null, null, host, null, null, tokenCredential); +}; + +/** +* Account SAS +* @ignore +*/ + +var azureCommon = __nccwpck_require__(18418); +var StorageServiceClient = azureCommon.StorageServiceClient; +var SharedKey = azureCommon.SharedKey; +/** +* Generates an account shared access signature token +* +* @param {string} [storageAccountOrConnectionString] The storage account or the connection string. +* @param {string} [storageAccessKey] The storage access key. +* @param {object} sharedAccessPolicy The shared access policy. +* @param {SharedAccessServices} sharedAccessPolicy.AccessPolicy.Services The services (blob, file, queue, table) for a shared access signature associated with this shared access policy. +* Refer to `Constants.AccountSasConstants.Services`. +* @param {SharedAccessResourceTypes} sharedAccessPolicy.AccessPolicy.ResourceTypes The resource type for a shared access signature associated with this shared access policy. +* Refer to `Constants.AccountSasConstants.ResourceTypes`. +* @param {SharedAccessPermissions} sharedAccessPolicy.AccessPolicy.Permissions The permissions for a shared access signature. +* Refer to `Constants.AccountSasConstants.Permissions`. +* @param {date} sharedAccessPolicy.AccessPolicy.Start The time at which the Shared Access Signature becomes valid. +* @param {date} sharedAccessPolicy.AccessPolicy.Expiry The time at which the Shared Access Signature becomes expired. +* @param {string} sharedAccessPolicy.AccessPolicy.IPAddressOrRange The permission type. Refer to `Constants.AccountSasConstants.ResourceTypes`. +* @param {string} sharedAccessPolicy.AccessPolicy.Protocols The possible protocols. Refer to `Constants.AccountSasConstants.ResourceTypes`. +*/ +exports.generateAccountSharedAccessSignature = function(storageAccountOrConnectionString, storageAccessKey, sharedAccessAccountPolicy) +{ + var storageSettings = StorageServiceClient.getStorageSettings(storageAccountOrConnectionString, storageAccessKey); + var sharedKey = new SharedKey(storageSettings._name, storageSettings._key); + + return sharedKey.generateAccountSignedQueryString(sharedAccessAccountPolicy); +}; + + +/** +* A callback that returns a response object. +* @callback errorOrResponse +* @param {object} error If an error occurs, will contain information about the error. +* @param {object} response Contains information about the response returned for the operation. +* For example, HTTP status codes and headers. +*/ + +/** +* A callback that returns result and response objects. +* @callback errorOrResult +* @param {object} error If an error occurs, will contain information about the error. +* @param {object} result The result of the operation. +* @param {object} response Contains information about the response returned for the operation. +* For example, HTTP status codes and headers. +*/ + + +/** +* Specifying conditional headers for blob service operations. See http://msdn.microsoft.com/en-us/library/dd179371.aspx for more information. +* @typedef {object} AccessConditions +* @property {string} EtagMatch If the ETag for the blob matches the specified ETag. +* Specify the wildcard character (*) to perform the operation only if the resource does exist, and fail the operation if it does not exist. +* @property {string} EtagNonMatch If the ETag for the blob does not match the specified ETag. +* Specify the wildcard character (*) to perform the operation only if the resource does not exist, and fail the operation if it does exist. +* @property {Date|string} DateModifedSince If the blob has been modified since the specified date. +* @property {Date|string} DateUnModifiedSince If the blob has not been modified since the specified date. +* @property {Number|string} SequenceNumberLessThanOrEqual If the blob's sequence number is less than or equal to the specified value. +* For Put Page operation only. See https://msdn.microsoft.com/en-us/library/azure/ee691975.aspx for more information. +* @property {Number|string} SequenceNumberLessThan If the blob's sequence number is less than the specified value. +* For Put Page operation only. See https://msdn.microsoft.com/en-us/library/azure/ee691975.aspx for more information. +* @property {Number|string} SequenceNumberEqual If the blob's sequence number is equal to the specified value. +* For Put Page operation only. See https://msdn.microsoft.com/en-us/library/azure/ee691975.aspx for more information. +* @property {Number|string} MaxBlobSize If the Append Block operation would cause the blob to exceed that limit or if the blob size is already greater than the specified value. +* For Append Block operation only. See https://msdn.microsoft.com/en-us/library/mt427365.aspx for more information. +* @property {Number|string} MaxAppendPosition If the append position is equal to the specified value. +* For Append Block operation only. See https://msdn.microsoft.com/en-us/library/mt427365.aspx for more information. +*/ + +/** +* The properties of a storage service, including properties of Storage Analytics and CORS (Cross-Origin Resource Sharing) rules. +* @typedef {object} ServiceProperties +* @property {string} DefaultServiceVersion The default version of Storage Analytics currently in use. +* @property {LoggingProperties} Logging The Logging settings. +* @property {MetricsProperties} HourMetrics The HourMetrics settings provide a summary of request statistics grouped by API in hourly aggregates. +* @property {MetricsProperties} MinuteMetrics The HourMetrics settings provide request statistics grouped by API for each minute. +* @property {object} Cors Groups all CORS rules. +* @property {CorsRule[]} Cors.CorsRules Groups settings for a `[CORS rule]{@link CorsRule}`. +*/ + +/** +* The properties of a storage account. +* @typedef {object} AccountProperties +* @property {string} SkuName The header that specifies storage SKU, also known as account type. +* @property {string} AccountKind The header that describes the flavour of the storage account, also known as account kind. +*/ + +/** +* The properties of a blob storage service, including properties of Storage Analytics, CORS (Cross-Origin Resource Sharing) rules and Static Webiste configurations. +* @typedef {object} BlobServiceProperties +* @property {string} DefaultServiceVersion The default version of Storage Analytics currently in use. +* @property {LoggingProperties} Logging The Logging settings. +* @property {MetricsProperties} HourMetrics The HourMetrics settings provide a summary of request statistics grouped by API in hourly aggregates. +* @property {MetricsProperties} MinuteMetrics The HourMetrics settings provide request statistics grouped by API for each minute. +* @property {StaticWebsiteProperties} StaticWebsite The Azure Static Website settings. +* @property {object} Cors Groups all CORS rules. +* @property {CorsRule[]} Cors.CorsRules Groups settings for a `[CORS rule]{@link CorsRule}`. +*/ + +/** +* The Azure Static Website settings. +* @typedef {object} StaticWebsiteProperties +* @property {boolean} Enabled Whether feature of Static Website is enabled. +* @property {string} IndexDocument Indicates index document page path. +* @property {string} ErrorDocument404Path Indicates 404 document page path. +*/ + +/** +* The Azure Analytics logging settings. +* @typedef {object} LoggingProperties +* @property {string} Version The version of Storage Analytics currently in use for logging. +* @property {boolean} Delete Indicates whether delete requests are being logged. +* @property {boolean} Read Indicates whether read requests are being logged. +* @property {boolean} Write Indicates whether write requests are being logged. +* @property {RetentionPolicy} RetentionPolicy The retention policy of the log data. +*/ + +/** +* The setting of Azure Analytics summary of request stastics. +* @typedef {object} MetricsProperties +* @property {string} Version The version of Storage Analytics currently in use for hour metrics. +* @property {string} Enabled Indicates whether metrics are enabled +* @property {boolean} IncludeAPIs Indicates whether metrics generate summary statistics for called API operations. +* @property {RetentionPolicy} RetentionPolicy The retention policy of the metrics data. +*/ + +/** +* The CORS rule of a storage service. +* @typedef {object} CorsRule +* @property {string[]} AllowedMethods A list of HTTP methods that are allowed to be executed by the origin. For Azure Storage, permitted methods are DELETE, GET, HEAD, MERGE, POST, OPTIONS or PUT. +* @property {string[]} AllowedOrigins A list of origin domains that are allowed via CORS, or "*" if all domains are allowed. +* @property {string[]} AllowedHeaders A list of headers allowed to be part of the cross-origin request. +* @property {string[]} ExposedHeaders A list of response headers to expose to CORS clients. +* @property {number} MaxAgeInSeconds The number of seconds that the client/browser should cache a preflight response. +*/ + +/** +* The Azure Analytics logging or metrics retention policy. +* @typedef {object} RetentionPolicy +* @property {boolean} Enabled Indicates whether a retention policy is enabled for the storage service. +* @property {number} Days Indicates the number of days that logging data is retained. All data older than this value will be deleted. +*/ + +/** +* The access policy. +* @typedef {object} AccessPolicy +* @property {string} Permissions The permission type. +* @property {Date} Start The time at which the access policy becomes valid. +* @property {Date} Expiry The time at which the access policy becomes expired. +* @property {string} IPAddressOrRange An IP address or a range of IP addresses from which to accept requests. When specifying a range, note that the range is inclusive. +* @property {string} Protocols The protocols permitted for a request made with the SAS. +* @property {string} Services The services (blob, file, queue, table) for a shared access signature associated with this shared access policy. +* @property {string} ResourceTypes The resource type for a shared access signature associated with this shared access policy. +*/ + +/** +* The service statistics. +* @typedef {object} ServiceStats +* @property {object} GeoReplication The geo replication stastics. +* @property {string} GeoReplication.Status The status of the secondary location. +* @property {Date} GeoReplication.LastSyncTime A GMT date/time value, to the second. +* All primary writes preceding this value are guaranteed to be available for read operations at the secondary. +* Primary writes after this point in time may or may not be available for reads. +*/ + +/** +* The range. +* @typedef {object} Range +* @property {number} start The start of the range. +* @property {number} end The end of the range. +*/ + +/** +* The range diff. Refer to https://msdn.microsoft.com/en-us/library/azure/mt736912.aspx +* @typedef {object} RangeDiff +* @property {number} start The start of the range. +* @property {number} end The end of the range. +* @property {boolean} isCleared If the range is cleared or not. + +*/ + +exports.Constants = azureCommon.Constants; +exports.StorageUtilities = azureCommon.StorageUtilities; +exports.AccessCondition = azureCommon.AccessCondition; + +exports.SR = azureCommon.SR; +exports.StorageServiceClient = StorageServiceClient; +exports.Logger = azureCommon.Logger; +exports.WebResource = azureCommon.WebResource; +exports.Validate = azureCommon.validate; +exports.date = azureCommon.date; +exports.TokenCredential = azureCommon.TokenCredential; + +// Other filters +exports.LinearRetryPolicyFilter = azureCommon.LinearRetryPolicyFilter; +exports.ExponentialRetryPolicyFilter = azureCommon.ExponentialRetryPolicyFilter; +exports.RetryPolicyFilter = azureCommon.RetryPolicyFilter; + +/***/ }), + +/***/ 82187: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +var exports = module.exports; + +var azureutil = __nccwpck_require__(3396); + +__nccwpck_require__(53035); + +var nodeVersion = azureutil.getNodeVersion(); +if (nodeVersion.major === 0 && nodeVersion.minor > 8 && !(nodeVersion.minor > 10 || (nodeVersion.minor === 10 && nodeVersion.patch >= 3))) { + throw new Error('The Microsoft Azure node SDK does not work with node versions > 0.9.0 and < 0.10.3. Please upgrade to node >= 0.10.3'); +} + +exports.xmlbuilder = __nccwpck_require__(25514); +exports.xml2js = __nccwpck_require__(37977); + +exports.Logger = __nccwpck_require__(98256); +exports.WebResource = __nccwpck_require__(72730); + +// Services +exports.StorageServiceClient = __nccwpck_require__(22275); + +// Models +exports.ServicePropertiesResult = __nccwpck_require__(92434); +exports.ServiceStatsParser = __nccwpck_require__(54525); +exports.AclResult = __nccwpck_require__(6319); +exports.TokenCredential = __nccwpck_require__(39649); + +// Filters +exports.LinearRetryPolicyFilter = __nccwpck_require__(1432); +exports.ExponentialRetryPolicyFilter = __nccwpck_require__(8360); +exports.RetryPolicyFilter = __nccwpck_require__(39744); + +// Signing +exports.SharedAccessSignature = __nccwpck_require__(68327); +exports.SharedKey = __nccwpck_require__(26130); + +// Streams +exports.BatchOperation = __nccwpck_require__(36587); +exports.ChunkAllocator = __nccwpck_require__(13548); +exports.ChunkStream = __nccwpck_require__(40473); +exports.ChunkStreamWithStream = __nccwpck_require__(18802); +exports.SpeedSummary = __nccwpck_require__(39201); +exports.BufferStream = __nccwpck_require__(63615); + +// Utilities +exports.Constants = __nccwpck_require__(60658); +exports.SR = __nccwpck_require__(13497); +exports.date = __nccwpck_require__(24018); +exports.ISO8061Date = __nccwpck_require__(88439); +exports.util = __nccwpck_require__(3396); +exports.validate = __nccwpck_require__(51046); +exports.StorageUtilities = __nccwpck_require__(68003); +exports.AccessCondition = __nccwpck_require__(60913); + +/***/ }), + +/***/ 18418: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +var azureCommon = __nccwpck_require__(82187); + +// Streams +azureCommon.FileReadStream = __nccwpck_require__(51477); + +module.exports = azureCommon; + +/***/ }), + +/***/ 98256: +/***/ ((module) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +/** +* Creates a new Logger object +* @class +* The Logger class is used to write log information. +* +* @constructor +* +* @param {string} [level] The log level. Refer to Logger.LogLevels. +* @param {object} [loggerFunction] The function to write log information. +*/ +function Logger(level, loggerFunction) { + /** + * The log level. Refer to the Logger.LogLevels for available log levels. + * @name Logger#level + * @type {string} + * @see Logger.LogLevels + */ + this.level = level; + + this.loggerFunction = loggerFunction; + + if (!this.loggerFunction) { + this.loggerFunction = this.defaultLoggerFunction; + } +} + +/** +* The available log levels. +* +* @const +* @enum {string} +*/ +Logger.LogLevels = { + /** + * System is unusable. + */ + EMERGENCY: 'emergency', + + /** + * Action must be taken immediately. + */ + ALERT : 'alert', + + /** + * Critical condition. + */ + CRITICAL : 'critical', + + /** + * Error condition. + */ + ERROR : 'error', + + /** + * Warning condition. + */ + WARNING : 'warning', + + /** + * Normal but significant condition. + */ + NOTICE : 'notice', + + /** + * Purely informational message. + */ + INFO : 'info', + + /** + * Application debug messages. + */ + DEBUG : 'debug' +}; + +Logger.logPriority = [ + Logger.LogLevels.EMERGENCY, + Logger.LogLevels.ALERT, + Logger.LogLevels.CRITICAL, + Logger.LogLevels.ERROR, + Logger.LogLevels.WARNING, + Logger.LogLevels.NOTICE, + Logger.LogLevels.INFO, + Logger.LogLevels.DEBUG +]; + +Logger.prototype.log = function (level, msg) { + this.loggerFunction(level, msg); +}; + +Logger.prototype.emergency = function(msg) { + this.log(Logger.LogLevels.EMERGENCY, msg); +}; + +Logger.prototype.critical = function(msg) { + this.log(Logger.LogLevels.CRITICAL, msg); +}; + +Logger.prototype.alert = function(msg) { + this.log(Logger.LogLevels.ALERT, msg); +}; + +Logger.prototype.error = function(msg) { + this.log(Logger.LogLevels.ERROR, msg); +}; + +Logger.prototype.warn = function(msg) { + this.log(Logger.LogLevels.WARNING, msg); +}; + +Logger.prototype.notice = function(msg) { + this.log(Logger.LogLevels.NOTICE, msg); +}; + +Logger.prototype.info = function(msg) { + this.log(Logger.LogLevels.INFO, msg); +}; + +Logger.prototype.debug = function(msg) { + this.log(Logger.LogLevels.DEBUG, msg); +}; + +Logger.prototype.defaultLoggerFunction = function(logLevel , msg) { + var currentLevelIndex = Logger.logPriority.indexOf(this.level); + var logLevelIndex = Logger.logPriority.indexOf(logLevel); + var time = new Date(); + var timeStamp = time.toISOString(); + if (logLevelIndex <= currentLevelIndex) { + console.log('[' + timeStamp + ']' + this.level + ' : ' + msg); + } +}; + +module.exports = Logger; + + +/***/ }), + +/***/ 12528: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var util = __nccwpck_require__(73837); +var _ = __nccwpck_require__(7404); + +function captureStackTrace(targetObject, constructorOpt) { + if (Error.captureStackTrace) { + Error.captureStackTrace(targetObject, constructorOpt); + } +} + +function ArgumentError(argumentName, message) { + captureStackTrace(this, this.constructor); + this.name = this.constructor.name; + this.argumentName = argumentName; + this.message = message || util.format('Invalid or missing argument supplied: %s', argumentName); +} +util.inherits(ArgumentError, Error); + +function ArgumentNullError(argumentName, message) { + captureStackTrace(this, this.constructor); + this.name = this.constructor.name; + this.argumentName = argumentName; + this.message = message || util.format('Missing argument: %s', argumentName); +} + +util.inherits(ArgumentNullError, Error); + +function StorageError(message, properties) { + captureStackTrace(this, this.constructor); + this.name = this.constructor.name; + this.message = message; + + if(properties){ + _.extend(this, properties); + } +} + +util.inherits(StorageError, Error); + +function TimeoutError(message) { + captureStackTrace(this, this.constructor); + this.name = this.constructor.name; + this.message = message; +} + +util.inherits(TimeoutError, Error); + +module.exports.ArgumentError = ArgumentError; +module.exports.ArgumentNullError = ArgumentNullError; +module.exports.StorageError = StorageError; +module.exports.TimeoutError = TimeoutError; +module.exports.captureStackTrace = captureStackTrace; + +/***/ }), + +/***/ 8360: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +"use strict"; +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + + + +var RetryPolicyFilter = __nccwpck_require__(39744); +/** +* Creates a new 'ExponentialRetryPolicyFilter' instance. +* @class +* The ExponentialRetryPolicyFilter allows you to retry operations, +* using an exponential back-off interval between retries. +* To apply a filter to service operations, use `withFilter` +* and specify the filter to be used when creating a service. +* @constructor +* @param {number} [retryCount=3] The client retry count. +* @param {number} [retryInterval=30000] The client retry interval, in milliseconds. +* @param {number} [minRetryInterval=3000] The minimum retry interval, in milliseconds. +* @param {number} [maxRetryInterval=90000] The maximum retry interval, in milliseconds. +* +* @example +* var azure = require('azure-storage'); +* var retryOperations = new azure.ExponentialRetryPolicyFilter(); +* var blobService = azure.createBlobService().withFilter(retryOperations) +*/ +function ExponentialRetryPolicyFilter(retryCount, retryInterval, minRetryInterval, maxRetryInterval) { + this.retryCount = retryCount ? retryCount : ExponentialRetryPolicyFilter.DEFAULT_CLIENT_RETRY_COUNT; + this.retryInterval = retryInterval ? retryInterval : ExponentialRetryPolicyFilter.DEFAULT_CLIENT_RETRY_INTERVAL; + this.minRetryInterval = minRetryInterval ? minRetryInterval : ExponentialRetryPolicyFilter.DEFAULT_CLIENT_MIN_RETRY_INTERVAL; + this.maxRetryInterval = maxRetryInterval ? maxRetryInterval : ExponentialRetryPolicyFilter.DEFAULT_CLIENT_MAX_RETRY_INTERVAL; +} + +/** +* Represents the default client retry interval, in milliseconds. +*/ +ExponentialRetryPolicyFilter.DEFAULT_CLIENT_RETRY_INTERVAL = 1000 * 30; + +/** +* Represents the default client retry count. +*/ +ExponentialRetryPolicyFilter.DEFAULT_CLIENT_RETRY_COUNT = 3; + +/** +* Represents the default maximum retry interval, in milliseconds. +*/ +ExponentialRetryPolicyFilter.DEFAULT_CLIENT_MAX_RETRY_INTERVAL = 1000 * 90; + +/** +* Represents the default minimum retry interval, in milliseconds. +*/ +ExponentialRetryPolicyFilter.DEFAULT_CLIENT_MIN_RETRY_INTERVAL = 1000 * 3; + +/** + * Determines if the operation should be retried and how long to wait until the next retry. + * + * @param {number} statusCode The HTTP status code. + * @param {object} requestOptions The request options. + * @return {retryInfo} Information about whether the operation qualifies for a retry and the retryInterval. + */ +ExponentialRetryPolicyFilter.prototype.shouldRetry = function (statusCode, requestOptions) { + var retryData = (requestOptions && requestOptions.retryContext) ? requestOptions.retryContext : {}; + + // Adjust retry interval + var incrementDelta = Math.pow(2, retryData.retryCount) - 1; + var boundedRandDelta = this.retryInterval * 0.8 + Math.floor(Math.random() * (this.retryInterval * 1.2 - this.retryInterval * 0.8)); + incrementDelta *= boundedRandDelta; + + retryData.retryInterval = Math.min(this.minRetryInterval + incrementDelta, this.maxRetryInterval); + + return RetryPolicyFilter._shouldRetryOnError(statusCode, requestOptions); +}; + +/** +* Handles an operation with an exponential retry policy. +* +* @param {Object} requestOptions The original request options. +* @param {function} next The next filter to be handled. +*/ +ExponentialRetryPolicyFilter.prototype.handle = function (requestOptions, next) { + RetryPolicyFilter._handle(this, requestOptions, next); +}; + +module.exports = ExponentialRetryPolicyFilter; + + +/***/ }), + +/***/ 1432: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +"use strict"; +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + + +var RetryPolicyFilter = __nccwpck_require__(39744); + +/** +* Creates a new LinearRetryPolicyFilter instance. +* @class +* The LinearRetryPolicyFilter allows you to retry operations, +* using an linear back-off interval between retries. +* To apply a filter to service operations, use `withFilter` +* and specify the filter to be used when creating a service. +* @constructor +* @param {number} [retryCount=3] The client retry count. +* @param {number} [retryInterval=30000] The client retry interval, in milliseconds. +* +* @example +* var azure = require('azure-storage'); +* var retryOperations = new azure.LinearRetryPolicyFilter(); +* var blobService = azure.createBlobService().withFilter(retryOperations) +*/ +function LinearRetryPolicyFilter(retryCount, retryInterval) { + this.retryCount = retryCount ? retryCount : LinearRetryPolicyFilter.DEFAULT_CLIENT_RETRY_COUNT; + this.retryInterval = retryInterval ? retryInterval : LinearRetryPolicyFilter.DEFAULT_CLIENT_RETRY_INTERVAL; +} + +/** +* Represents the default client retry interval, in milliseconds. +*/ +LinearRetryPolicyFilter.DEFAULT_CLIENT_RETRY_INTERVAL = 1000 * 30; + +/** +* Represents the default client retry count. +*/ +LinearRetryPolicyFilter.DEFAULT_CLIENT_RETRY_COUNT = 3; + +/** +* Determines if the operation should be retried and how long to wait until the next retry. +* + * @param {number} statusCode The HTTP status code. + * @param {object} requestOptions The request options. + * @return {retryInfo} Information about whether the operation qualifies for a retry and the retryInterval. +*/ +LinearRetryPolicyFilter.prototype.shouldRetry = function (statusCode, requestOptions) { + var retryData = (requestOptions && requestOptions.retryContext) ? requestOptions.retryContext : {}; + retryData.retryInterval = this.retryInterval; + + return RetryPolicyFilter._shouldRetryOnError(statusCode, requestOptions); +}; + +/** +* Handles an operation with a linear retry policy. +* +* @param {Object} requestOptions The original request options. +* @param {function} next The next filter to be handled. +*/ +LinearRetryPolicyFilter.prototype.handle = function (requestOptions, next) { + RetryPolicyFilter._handle(this, requestOptions, next); +}; + +module.exports = LinearRetryPolicyFilter; + + +/***/ }), + +/***/ 39744: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +var azureutil = __nccwpck_require__(3396); +var Constants = __nccwpck_require__(60658); +var StorageUtilities = __nccwpck_require__(68003); +var extend = (__nccwpck_require__(73837)._extend); + +/** +* Creates a new RetryPolicyFilter instance. +* @class +* The RetryPolicyFilter allows you to retry operations, +* using a custom retry policy. Users are responsible to +* define the shouldRetry method. +* To apply a filter to service operations, use `withFilter` +* and specify the filter to be used when creating a service. +* @constructor +* @param {number} [retryCount=30000] The client retry count. +* @param {number} [retryInterval=3] The client retry interval, in milliseconds. +* +* @example +* var azure = require('azure-storage'); +* var retryPolicy = new azure.RetryPolicyFilter(); +* retryPolicy.retryCount = 3; +* retryPolicy.retryInterval = 3000; +* retryPolicy.shouldRetry = function(statusCode, retryContext) { +* +* }; +* var blobService = azure.createBlobService().withFilter(retryPolicy); +*/ +function RetryPolicyFilter(retryCount, retryInterval) { + this.retryCount = retryCount ? retryCount : RetryPolicyFilter.DEFAULT_CLIENT_RETRY_COUNT; + this.retryInterval = retryInterval ? retryInterval : RetryPolicyFilter.DEFAULT_CLIENT_RETRY_INTERVAL; +} + +/** +* Represents the default client retry interval, in milliseconds. +*/ +RetryPolicyFilter.DEFAULT_CLIENT_RETRY_INTERVAL = 1000 * 30; + +/** +* Represents the default client retry count. +*/ +RetryPolicyFilter.DEFAULT_CLIENT_RETRY_COUNT = 3; + +/** +* Handles an operation with a retry policy. +* +* @param {Object} requestOptions The original request options. +* @param {function} next The next filter to be handled. +*/ +RetryPolicyFilter.prototype.handle = function (requestOptions, next) { + RetryPolicyFilter._handle(this, requestOptions, next); +}; + +/** +* Handles an operation with a retry policy. +* +* @param {Object} requestOptions The original request options. +* @param {function} next The next filter to be handled. +*/ +RetryPolicyFilter._handle = function (self, requestOptions, next) { + + var retryRequestOptions = extend({}, requestOptions); + + // Initialize retryContext because that will be passed to the shouldRetry method which users will implement + retryRequestOptions.retryContext = { + retryCount: 0, + error: null, + retryInterval: retryRequestOptions.retryInterval, + locationMode: retryRequestOptions.locationMode, + currentLocation: retryRequestOptions.currentLocation + }; + + var lastPrimaryAttempt; + var lastSecondaryAttempt; + var operation = function () { + // retry policies dont really do anything to the request options + // so move on to next + if (next) { + next(retryRequestOptions, function (returnObject, finalCallback, nextPostCallback) { + // Previous operation ended so update the retry data + if (returnObject.error) { + if (retryRequestOptions.retryContext.error) { + returnObject.error.innerError = retryRequestOptions.retryContext.error; + } + + retryRequestOptions.retryContext.error = returnObject.error; + } + + // If a request sent to the secondary location fails with 404 (Not Found), it is possible + // that the resource replication is not finished yet. So, in case of 404 only in the secondary + // location, the failure should still be retryable. + var secondaryNotFound = (retryRequestOptions.currentLocation === Constants.StorageLocation.SECONDARY) && ((returnObject.response && returnObject.response.statusCode === 404) || (returnObject.error && returnObject.error.code === 'ENOTFOUND')); + + var notExceedMaxRetryCount = retryRequestOptions.retryContext.retryCount ? retryRequestOptions.retryContext.retryCount <= self.retryCount : true; + var retryInfo = self.shouldRetry(secondaryNotFound ? 500 : (azureutil.objectIsNull(returnObject.response) ? 306 : returnObject.response.statusCode), retryRequestOptions); + retryRequestOptions.retryContext.retryCount++; + + if (retryInfo.ignore) { + returnObject.error = null; + } + + // If the custom retry logic(shouldRetry) does not return a targetLocation, calculate based on the previous location and locationMode. + if(azureutil.objectIsNull(retryInfo.targetLocation)) { + retryInfo.targetLocation = azureutil.getNextLocation(retryRequestOptions.currentLocation, retryRequestOptions.locationMode); + } + + // If the custom retry logic(shouldRetry) does not return a retryInterval, try to set it to the value on the instance if it is available. Otherwise, the default(30000) will be used. + if(azureutil.objectIsNull(retryInfo.retryInterval)) { + retryInfo.retryInterval = self.retryInterval; + } + + // Only in the case of success from server but client side failure like MD5 or length mismatch, returnObject.retryable has a value(we explicitly set it to false). + // In this case, we should not retry the request. + // If the output stream already get sent to server and get error back, + // we should NOT retry within the SDK as the stream data is not valid anymore if we retry directly. + if ( + !returnObject.outputStreamSent && returnObject.error && azureutil.objectIsNull(returnObject.retryable) && notExceedMaxRetryCount && + ( + (!azureutil.objectIsNull(returnObject.response) && retryInfo.retryable) || + ( + returnObject.error.code === 'ECONNREFUSED' || + returnObject.error.code === 'ETIMEDOUT' || + returnObject.error.code === 'ESOCKETTIMEDOUT' || + returnObject.error.code === 'ECONNRESET' || + returnObject.error.code === 'EAI_AGAIN' || + returnObject.error.message === 'XHR error' // stream-http XHR network error message in browsers + ) + ) + ) { + if (retryRequestOptions.currentLocation === Constants.StorageLocation.PRIMARY) { + lastPrimaryAttempt = returnObject.operationEndTime; + } else { + lastSecondaryAttempt = returnObject.operationEndTime; + } + + // Moreover, in case of 404 when trying the secondary location, instead of retrying on the + // secondary, further requests should be sent only to the primary location, as it most + // probably has a higher chance of succeeding there. + if (secondaryNotFound && (retryRequestOptions.locationMode !== StorageUtilities.LocationMode.SECONDARY_ONLY)) + { + retryInfo.locationMode = StorageUtilities.LocationMode.PRIMARY_ONLY; + retryInfo.targetLocation = Constants.StorageLocation.PRIMARY; + } + + // Now is the time to calculate the exact retry interval. ShouldRetry call above already + // returned back how long two requests to the same location should be apart from each other. + // However, for the reasons explained above, the time spent between the last attempt to + // the target location and current time must be subtracted from the total retry interval + // that ShouldRetry returned. + var lastAttemptTime = retryInfo.targetLocation === Constants.StorageLocation.PRIMARY ? lastPrimaryAttempt : lastSecondaryAttempt; + if (!azureutil.objectIsNull(lastAttemptTime)) { + var sinceLastAttempt = new Date().getTime() - lastAttemptTime.getTime(); + if (sinceLastAttempt < 0) { + sinceLastAttempt = 0; + } + + retryRequestOptions.retryInterval = retryInfo.retryInterval - sinceLastAttempt; + } + else { + retryRequestOptions.retryInterval = 0; + } + + if(!azureutil.objectIsNull(retryInfo.locationMode)) { + retryRequestOptions.locationMode = retryInfo.locationMode; + } + + retryRequestOptions.currentLocation = retryInfo.targetLocation; + operation(); + } else { + if (nextPostCallback) { + nextPostCallback(returnObject); + } else if (finalCallback) { + finalCallback(returnObject); + } + } + }); + } + }; + + operation(); +}; + +RetryPolicyFilter._shouldRetryOnError = function (statusCode, requestOptions) { + var retryInfo = (requestOptions && requestOptions.retryContext) ? requestOptions.retryContext : {}; + + // Non-timeout Cases + if (statusCode >= 300 && statusCode != 408) { + // Always no retry on "not implemented" and "version not supported" + if (statusCode == 501 || statusCode == 505) { + retryInfo.retryable = false; + return retryInfo; + } + + // When absorbConditionalErrorsOnRetry is set (for append blob) + if (requestOptions && requestOptions.absorbConditionalErrorsOnRetry) { + if (statusCode == 412) { + // When appending block with precondition failure and their was a server error before, we ignore the error. + if (retryInfo.lastServerError) { + retryInfo.ignore = true; + retryInfo.retryable = true; + } else { + retryInfo.retryable = false; + } + } else if (statusCode >= 500 && statusCode < 600) { + // Retry on the server error + retryInfo.retryable = true; + retryInfo.lastServerError = true; + } + } else if (statusCode < 500) { + // No retry on the client error + retryInfo.retryable = false; + } + } + + return retryInfo; +}; + +module.exports = RetryPolicyFilter; + + +/***/ }), + +/***/ 72730: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var azureutil = __nccwpck_require__(3396); +var SR = __nccwpck_require__(13497); +var Constants = __nccwpck_require__(60658); +var errors = __nccwpck_require__(12528); +var ArgumentError = errors.ArgumentError; +var HeaderConstants = Constants.HeaderConstants; +var HttpConstants = Constants.HttpConstants; +var HttpConstants = Constants.HttpConstants; +var HttpVerbs = HttpConstants.HttpVerbs; + +function encodeSpecialCharacters(path) { + return path.replace(/'/g, '%27'); +} + +/** +* Creates a new WebResource object. +* +* This class provides an abstraction over a REST call by being library / implementation agnostic and wrapping the necessary +* properties to initiate a request. +* +* @constructor +*/ +function WebResource() { + this.rawResponse = false; + this.queryString = {}; +} + +/** +* Creates a new put request web resource. +* +* @param {string} path The path for the put operation. +* @return {WebResource} A new webresource with a put operation for the given path. +*/ +WebResource.put = function (path) { + var webResource = new WebResource(); + webResource.path = path ? encodeSpecialCharacters(path) : null; + webResource.method = HttpConstants.HttpVerbs.PUT; + return webResource; +}; + +/** +* Creates a new get request web resource. +* +* @param {string} path The path for the get operation. +* @return {WebResource} A new webresource with a get operation for the given path. +*/ +WebResource.get = function (path) { + var webResource = new WebResource(); + webResource.path = path ? encodeSpecialCharacters(path) : null; + webResource.method = HttpConstants.HttpVerbs.GET; + return webResource; +}; + +/** +* Creates a new head request web resource. +* +* @param {string} path The path for the head operation. +* @return {WebResource} A new webresource with a head operation for the given path. +*/ +WebResource.head = function (path) { + var webResource = new WebResource(); + webResource.path = path ? encodeSpecialCharacters(path) : null; + webResource.method = HttpConstants.HttpVerbs.HEAD; + return webResource; +}; + +/** +* Creates a new delete request web resource. +* +* @param {string} path The path for the delete operation. +* @return {WebResource} A new webresource with a delete operation for the given path. +*/ +WebResource.del = function (path) { + var webResource = new WebResource(); + webResource.path = path ? encodeSpecialCharacters(path) : null; + webResource.method = HttpConstants.HttpVerbs.DELETE; + return webResource; +}; + +/** +* Creates a new post request web resource. +* +* @param {string} path The path for the post operation. +* @return {WebResource} A new webresource with a post operation for the given path. +*/ +WebResource.post = function (path) { + var webResource = new WebResource(); + webResource.path = path ? encodeSpecialCharacters(path) : null; + webResource.method = HttpConstants.HttpVerbs.POST; + return webResource; +}; + +/** +* Creates a new merge request web resource. +* +* @param {string} path The path for the merge operation. +* @return {WebResource} A new webresource with a merge operation for the given path. +*/ +WebResource.merge = function (path) { + var webResource = new WebResource(); + webResource.path = path ? encodeSpecialCharacters(path) : null; + webResource.method = HttpConstants.HttpVerbs.MERGE; + return webResource; +}; + +/** +* Specifies a custom property in the web resource. +* +* @param {string} name The property name. +* @param {string} value The property value. +* @return {WebResource} The webresource. +*/ +WebResource.prototype.withProperty = function (name, value) { + if (!this.properties) { + this.properties = {}; + } + + this.properties[name] = value; + + return this; +}; + +/** +* Specifies if the response should be parsed or not. +* +* @param {bool} rawResponse true if the response should not be parsed; false otherwise. +* @return {WebResource} The webresource. +*/ +WebResource.prototype.withRawResponse = function (rawResponse) { + this.rawResponse = rawResponse; + + if (azureutil.objectIsNull(this.rawResponse)) { + this.rawResponse = true; + } + + return this; +}; + +WebResource.prototype.withHeadersOnly = function (headersOnly) { + if (headersOnly !== undefined) { + this.headersOnly = headersOnly; + } else { + this.headersOnly = true; + } + + return this; +}; + +/** +* Adds an optional query string parameter. +* +* @param {Object} name The name of the query string parameter. +* @param {Object} value The value of the query string parameter. +* @param {Object} defaultValue The default value for the query string parameter to be used if no value is passed. +* @return {Object} The web resource. +*/ +WebResource.prototype.withQueryOption = function (name, value, defaultValue) { + if (!azureutil.objectIsNull(value)) { + this.queryString[name] = value; + } else if (defaultValue) { + this.queryString[name] = defaultValue; + } + + return this; +}; + +/** +* Adds optional query string parameters. +* +* Additional arguments will be the needles to search in the haystack. +* +* @param {Object} object The haystack of query string parameters. +* @return {Object} The web resource. +*/ +WebResource.prototype.withQueryOptions = function (object) { + if (object) { + for (var i = 1; i < arguments.length; i++) { + if (object[arguments[i]]) { + this.withQueryOption(arguments[i], object[arguments[i]]); + } + } + } + + return this; +}; + +/** +* Adds an optional header parameter. +* +* @param {Object} name The name of the header parameter. +* @param {Object} value The value of the header parameter. +* @return {Object} The web resource. +*/ +WebResource.prototype.withHeader = function (name, value) { + if (!this.headers) { + this.headers = {}; + } + + if (!azureutil.IsNullOrEmptyOrUndefinedOrWhiteSpace(value)) { + value = value instanceof Date ? value.toUTCString() : value; + + this.headers[name] = value; + } + + return this; +}; + +/** +* Adds an optional body. +* +* @param {Object} body The request body. +* @return {Object} The web resource. +*/ +WebResource.prototype.withBody = function (body) { + this.body = body; + return this; +}; + +/** +* Adds optional query string parameters. +* +* Additional arguments will be the needles to search in the haystack. +* +* @param {Object} object The haystack of headers. +* @return {Object} The web resource. +*/ +WebResource.prototype.withHeaders = function (object) { + if (object) { + for (var i = 1; i < arguments.length; i++) { + if (object[arguments[i]]) { + this.withHeader(arguments[i], object[arguments[i]]); + } + } + } + + return this; +}; + +WebResource.prototype.addOptionalMetadataHeaders = function (metadata) { + var self = this; + + if (metadata) { + Object.keys(metadata).forEach(function (metadataKey) { + if (azureutil.IsNullOrEmptyOrUndefinedOrWhiteSpace(metadataKey)) { + throw new ArgumentError('metadata', SR.METADATA_KEY_INVALID); + } + + var value = metadata[metadataKey]; + if (azureutil.IsNullOrEmptyOrUndefinedOrWhiteSpace(value)) { + throw new ArgumentError('metadata', SR.METADATA_VALUE_INVALID); + } + + var metadataHeaderName = HeaderConstants.PREFIX_FOR_STORAGE_METADATA + metadataKey; + var existingMetadataHeaderName = ''; + var headers = self.headers ? self.headers : {}; + if (Object.keys(headers).some(function (headerName) { + existingMetadataHeaderName = headerName; + return headerName.toString().toLowerCase() === metadataHeaderName.toLowerCase(); + })) { + self.withHeader(existingMetadataHeaderName, self.headers[existingMetadataHeaderName] + ',' + value); + } else { + self.withHeader(metadataHeaderName, value); + } + }); + } + + return this; +}; + +/** +* Determines if a status code corresponds to a valid response according to the WebResource's expected status codes. +* +* @param {int} statusCode The response status code. +* @return true if the response is valid; false otherwise. +*/ +WebResource.validResponse = function (statusCode) { + if (statusCode >= 200 && statusCode < 300) { + return true; + } + + return false; +}; + +function isMethodWithBody(verb) { + return verb === HttpVerbs.PUT || + verb === HttpVerbs.POST || + verb === HttpVerbs.MERGE; +} + +/** +* Hook up the given input stream to a destination output stream if the WebResource method +* requires a request body and a body is not already set. +* +* @param {Stream} inputStream the stream to pipe from +* @param {Stream} outputStream the stream to pipe to +* +* @return destStream +*/ +WebResource.prototype.pipeInput = function(inputStream, destStream) { + if (isMethodWithBody(this.method) && !this.hasOwnProperty('body')) { + inputStream.pipe(destStream); + } + + return destStream; +}; + +module.exports = WebResource; + +/***/ }), + +/***/ 11007: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +var crypto = __nccwpck_require__(6113); + +var Md5Wrapper = function () { +}; + +Md5Wrapper.prototype.createMd5Hash = function() { + return crypto.createHash('md5'); +}; + +module.exports = Md5Wrapper; + +/***/ }), + +/***/ 36426: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var Constants = __nccwpck_require__(60658); +var HeaderConstants = Constants.HeaderConstants; + +exports.parse = function (headers) { + var accountPropertiesResult = {}; + + if (headers[HeaderConstants.SKU_NAME]) { + accountPropertiesResult.SkuName = headers[HeaderConstants.SKU_NAME]; + } + + if (headers[HeaderConstants.ACCOUNT_KIND]) { + accountPropertiesResult.AccountKind = headers[HeaderConstants.ACCOUNT_KIND]; + } + + return accountPropertiesResult; +}; + +/***/ }), + +/***/ 6319: +/***/ ((module, exports, __nccwpck_require__) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var _ = __nccwpck_require__(7404); +var xmlbuilder = __nccwpck_require__(25514); + +var azureutil = __nccwpck_require__(3396); +var ISO8061Date = __nccwpck_require__(88439); +var Constants = __nccwpck_require__(60658); +var AclConstants = Constants.AclConstants; + +exports = module.exports; + +/** +* Builds an XML representation for container acl permissions. +* +* @param {Object.} entity The signed identifiers. +* @return {string} The XML container acl permissions. +*/ +exports.serialize = function (signedIdentifiersJs) { + var doc = xmlbuilder.create(AclConstants.SIGNED_IDENTIFIERS_ELEMENT, { version: '1.0', encoding: 'utf-8' }); + + var keys = Object.keys(signedIdentifiersJs); + if (keys.length > 0) { + keys.forEach(function (key) { + var accessPolicy = signedIdentifiersJs[key]; + doc = doc + .ele(AclConstants.SIGNED_IDENTIFIER_ELEMENT) + .ele(AclConstants.ID) + .txt(key) + .up() + .ele(AclConstants.ACCESS_POLICY); + + if (accessPolicy.Start) { + var startIsoString = accessPolicy.Start; + if (!_.isDate(startIsoString)) { + startIsoString = new Date(startIsoString); + } + + // Convert to expected ISO 8061 date format + startIsoString = ISO8061Date.format(startIsoString); + + doc = doc + .ele(AclConstants.START) + .txt(startIsoString) + .up(); + } + + if (accessPolicy.Expiry) { + var expiryIsoString = accessPolicy.Expiry; + if (!_.isDate(expiryIsoString)) { + expiryIsoString = new Date(expiryIsoString); + } + + // Convert to expected ISO 8061 date format + expiryIsoString = ISO8061Date.format(expiryIsoString); + + doc = doc + .ele(AclConstants.EXPIRY) + .txt(expiryIsoString) + .up(); + } + + if (accessPolicy.Permissions) { + doc = doc + .ele(AclConstants.PERMISSION) + .txt(accessPolicy.Permissions) + .up(); + } + + doc = doc.up().up(); + }); + } + return doc.doc().toString(); +}; + +exports.parse = function (signedIdentifiersXml) { + var signedIdentifiers = {}; + + signedIdentifiersXml = azureutil.tryGetValueChain(signedIdentifiersXml, [ 'SignedIdentifiers', 'SignedIdentifier' ]); + if (signedIdentifiersXml) { + if (!_.isArray(signedIdentifiersXml)) { + signedIdentifiersXml = [ signedIdentifiersXml ]; + } + + signedIdentifiersXml.forEach(function (signedIdentifier) { + var accessPolicy = {}; + if (signedIdentifier.AccessPolicy) { + if (signedIdentifier.AccessPolicy.Start) { + accessPolicy.Start = ISO8061Date.parse(signedIdentifier.AccessPolicy.Start); + } + + if (signedIdentifier.AccessPolicy.Expiry) { + accessPolicy.Expiry = ISO8061Date.parse(signedIdentifier.AccessPolicy.Expiry); + } + + if (signedIdentifier.AccessPolicy.Permission) { + accessPolicy.Permissions = signedIdentifier.AccessPolicy.Permission; + } + } + + signedIdentifiers[signedIdentifier.Id] = accessPolicy; + }); + } + + return signedIdentifiers; +}; + +/***/ }), + +/***/ 92434: +/***/ ((module, exports, __nccwpck_require__) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var _ = __nccwpck_require__(7404); +var xmlbuilder = __nccwpck_require__(25514); + +var Constants = __nccwpck_require__(60658); +var ServicePropertiesConstants = Constants.ServicePropertiesConstants; + +exports = module.exports; + +function serializeRetentionPolicy(doc, policy) { + if (policy !== null) { + if (typeof policy === 'undefined') { + policy = {}; + } + + doc = doc.ele(ServicePropertiesConstants.RETENTION_POLICY_ELEMENT); + if (typeof policy.Enabled !== 'undefined') { + doc = doc.ele(ServicePropertiesConstants.ENABLED_ELEMENT) + .txt(policy.Enabled) + .up(); + } else { + doc = doc.ele(ServicePropertiesConstants.ENABLED_ELEMENT) + .txt(false) + .up(); + } + + if (typeof policy.Days !== 'undefined') { + doc = doc.ele(ServicePropertiesConstants.DAYS_ELEMENT) + .txt(policy.Days) + .up(); + } else if (policy.Enabled === true) { + doc = doc.ele(ServicePropertiesConstants.DAYS_ELEMENT) + .txt(1) + .up(); + } + + doc = doc.up(); + } +} + +function serializeDeleteRetentionPolicy(doc, policy) { + if (policy !== null) { + if (typeof policy === 'undefined') { + policy = {}; + } + + if (typeof policy.Enabled !== 'undefined') { + doc = doc.ele(ServicePropertiesConstants.ENABLED_ELEMENT) + .txt(policy.Enabled) + .up(); + } else { + doc = doc.ele(ServicePropertiesConstants.ENABLED_ELEMENT) + .txt(false) + .up(); + } + + if (typeof policy.Days !== 'undefined') { + doc = doc.ele(ServicePropertiesConstants.DAYS_ELEMENT) + .txt(policy.Days) + .up(); + } else if (policy.Enabled === true) { + doc = doc.ele(ServicePropertiesConstants.DAYS_ELEMENT) + .txt(1) + .up(); + } + + doc = doc.up(); + } +} + +function serializeStaticWebsite(doc, staticWebsite) { + if (staticWebsite !== null) { + if (typeof staticWebsite === 'undefined') { + staticWebsite = {}; + } + + if (typeof staticWebsite.Enabled !== 'undefined') { + doc = doc.ele(ServicePropertiesConstants.ENABLED_ELEMENT) + .txt(staticWebsite.Enabled) + .up(); + } else { + doc = doc.ele(ServicePropertiesConstants.ENABLED_ELEMENT) + .txt(false) + .up(); + } + + if (typeof staticWebsite.IndexDocument !== 'undefined') { + doc = doc.ele(ServicePropertiesConstants.DEFAULT_INDEX_DOCUMENT_ELEMENT) + .txt(staticWebsite.IndexDocument) + .up(); + } + + if (typeof staticWebsite.ErrorDocument404Path !== 'undefined') { + doc = doc.ele(ServicePropertiesConstants.DEFAULT_ERROR_DOCUMENT_404_PATH_ELEMENT) + .txt(staticWebsite.ErrorDocument404Path) + .up(); + } + + doc = doc.up(); + } +} + +function serializeLogging(doc, logging) { + if (typeof logging.Version !== 'undefined') { + doc = doc.ele(ServicePropertiesConstants.VERSION_ELEMENT) + .txt(logging.Version) + .up(); + } else { + doc = doc.ele(ServicePropertiesConstants.VERSION_ELEMENT) + .txt(ServicePropertiesConstants.DEFAULT_ANALYTICS_VERSION) + .up(); + } + + if (typeof logging.Delete !== 'undefined') { + doc = doc.ele(ServicePropertiesConstants.DELETE_ELEMENT) + .txt(logging.Delete) + .up(); + } else { + doc = doc.ele(ServicePropertiesConstants.DELETE_ELEMENT) + .txt(false) + .up(); + } + + if (typeof logging.Read !== 'undefined') { + doc = doc.ele(ServicePropertiesConstants.READ_ELEMENT) + .txt(logging.Read) + .up(); + } else { + doc = doc.ele(ServicePropertiesConstants.READ_ELEMENT) + .txt(false) + .up(); + } + + if (typeof logging.Write !== 'undefined') { + doc = doc.ele(ServicePropertiesConstants.WRITE_ELEMENT) + .txt(logging.Write) + .up(); + } else { + doc = doc.ele(ServicePropertiesConstants.WRITE_ELEMENT) + .txt(false) + .up(); + } + + serializeRetentionPolicy(doc, logging.RetentionPolicy); + + doc = doc.up(); +} + +function serializeMetrics(doc, metrics) { + if (typeof metrics.Version !== 'undefined') { + doc = doc.ele(ServicePropertiesConstants.VERSION_ELEMENT) + .txt(metrics.Version) + .up(); + } else { + doc = doc.ele(ServicePropertiesConstants.VERSION_ELEMENT) + .txt(ServicePropertiesConstants.DEFAULT_ANALYTICS_VERSION) + .up(); + } + + if (typeof metrics.Enabled !== 'undefined') { + doc = doc.ele(ServicePropertiesConstants.ENABLED_ELEMENT) + .txt(metrics.Enabled) + .up(); + } else { + doc = doc.ele(ServicePropertiesConstants.ENABLED_ELEMENT) + .txt(false) + .up(); + } + + if (metrics.Enabled) { + if (typeof metrics.IncludeAPIs !== 'undefined') { + doc = doc.ele(ServicePropertiesConstants.INCLUDE_APIS_ELEMENT) + .txt(metrics.IncludeAPIs) + .up(); + } else if (metrics.Enabled === true) { + doc = doc.ele(ServicePropertiesConstants.INCLUDE_APIS_ELEMENT) + .txt(false) + .up(); + } + } + serializeRetentionPolicy(doc, metrics.RetentionPolicy); +} + +function serializeCorsRules(doc, rules) { + if (typeof rules !== 'undefined' && rules !== null && _.isArray(rules)) { + rules.forEach(function (rule) { + doc = doc.ele(ServicePropertiesConstants.CORS_RULE_ELEMENT); + + if (typeof rule.AllowedMethods !== 'undefined' && _.isArray(rule.AllowedMethods)) { + doc = doc.ele(ServicePropertiesConstants.ALLOWED_METHODS_ELEMENT) + .txt(rule.AllowedMethods.join(',')) + .up(); + } + + if (typeof rule.AllowedOrigins !== 'undefined' && _.isArray(rule.AllowedOrigins)) { + doc = doc.ele(ServicePropertiesConstants.ALLOWED_ORIGINS_ELEMENT) + .txt(rule.AllowedOrigins.join(',')) + .up(); + } + + if (typeof rule.AllowedHeaders !== 'undefined' && _.isArray(rule.AllowedHeaders)) { + doc = doc.ele(ServicePropertiesConstants.ALLOWED_HEADERS_ELEMENT) + .txt(rule.AllowedHeaders.join(',')) + .up(); + } else { + doc = doc.ele(ServicePropertiesConstants.ALLOWED_HEADERS_ELEMENT) + .txt('') + .up(); + } + + if (typeof rule.ExposedHeaders !== 'undefined' && _.isArray(rule.ExposedHeaders)) { + doc = doc.ele(ServicePropertiesConstants.EXPOSED_HEADERS_ELEMENT) + .txt(rule.ExposedHeaders.join(',')) + .up(); + } else { + doc = doc.ele(ServicePropertiesConstants.EXPOSED_HEADERS_ELEMENT) + .txt('') + .up(); + } + + if (typeof rule.MaxAgeInSeconds !== 'undefined') { + doc = doc.ele(ServicePropertiesConstants.MAX_AGE_IN_SECONDS_ELEMENT) + .txt(rule.MaxAgeInSeconds) + .up(); + } else { + doc = doc.ele(ServicePropertiesConstants.MAX_AGE_IN_SECONDS_ELEMENT) + .txt('0') + .up(); + } + + doc = doc.up(); + }); + } +} + +exports.serialize = function (servicePropertiesJs) { + var doc = xmlbuilder.create(ServicePropertiesConstants.STORAGE_SERVICE_PROPERTIES_ELEMENT, { version: '1.0', encoding: 'utf-8' }); + + if (servicePropertiesJs.Logging) { + doc = doc.ele(ServicePropertiesConstants.LOGGING_ELEMENT); + serializeLogging(doc, servicePropertiesJs.Logging); + doc = doc.up(); + } + + if (servicePropertiesJs.HourMetrics) { + doc = doc.ele(ServicePropertiesConstants.HOUR_METRICS_ELEMENT); + serializeMetrics(doc, servicePropertiesJs.HourMetrics); + doc = doc.up(); + } + + if (servicePropertiesJs.MinuteMetrics) { + doc = doc.ele(ServicePropertiesConstants.MINUTE_METRICS_ELEMENT); + serializeMetrics(doc, servicePropertiesJs.MinuteMetrics); + doc = doc.up(); + } + + if (servicePropertiesJs.Cors) { + doc = doc.ele(ServicePropertiesConstants.CORS_ELEMENT); + serializeCorsRules(doc, servicePropertiesJs.Cors.CorsRule); + doc = doc.up(); + } + + if (servicePropertiesJs.DefaultServiceVersion) { + doc = doc.ele(ServicePropertiesConstants.DEFAULT_SERVICE_VERSION_ELEMENT) + .txt(servicePropertiesJs.DefaultServiceVersion) + .up(); + } + + if (servicePropertiesJs.DeleteRetentionPolicy) { + doc = doc.ele(ServicePropertiesConstants.DEFAULT_DELETE_RETENTION_POLICY_ELEMENT); + serializeDeleteRetentionPolicy(doc, servicePropertiesJs.DeleteRetentionPolicy); + doc = doc.up(); + } + + if (servicePropertiesJs.StaticWebsite) { + doc = doc.ele(ServicePropertiesConstants.DEFAULT_STATIC_WEBSITE_ELEMENT); + serializeStaticWebsite(doc, servicePropertiesJs.StaticWebsite); + doc = doc.up(); + } + + return doc.doc().toString(); +}; + +function parseRetentionPolicy(policyXml) { + var policy = {}; + + if (typeof policyXml.Enabled !== 'undefined') { + policy.Enabled = policyXml.Enabled === 'true'; + } + + if (typeof policyXml.Days !== 'undefined') { + policy.Days = parseInt(policyXml.Days, 10); + } + + return policy; +} + +function parseLogging(loggingXml) { + var logging = {}; + + if (typeof loggingXml.Version !== 'undefined') { + logging.Version = loggingXml.Version; + } + + if (typeof loggingXml.Delete !== 'undefined') { + logging.Delete = loggingXml.Delete === 'true'; + } + + if (typeof loggingXml.Read !== 'undefined') { + logging.Read = loggingXml.Read === 'true'; + } + + if (typeof loggingXml.Write !== 'undefined') { + logging.Write = loggingXml.Write === 'true'; + } + + if (typeof loggingXml.RetentionPolicy !== 'undefined') { + logging.RetentionPolicy = parseRetentionPolicy(loggingXml.RetentionPolicy); + } + + return logging; +} + +function parseMetrics(metricsXml) { + var metrics = {}; + + if (typeof metricsXml.Version !== 'undefined') { + metrics.Version = metricsXml.Version; + } + + if (typeof metricsXml.Enabled !== 'undefined') { + metrics.Enabled = metricsXml.Enabled === 'true'; + } + + if (typeof metricsXml.IncludeAPIs !== 'undefined') { + metrics.IncludeAPIs = metricsXml.IncludeAPIs === 'true'; + } + + if (typeof metricsXml.RetentionPolicy !== 'undefined') { + metrics.RetentionPolicy = parseRetentionPolicy(metricsXml.RetentionPolicy); + } + + return metrics; +} + +function parseCors(corsXml) { + var cors = {}; + + if (typeof corsXml.CorsRule !== 'undefined') { + var rulesXml = corsXml.CorsRule; + if (!_.isArray(rulesXml)) { + rulesXml = [rulesXml]; + } + + cors.CorsRule = []; + rulesXml.forEach(function (ruleXml) { + var rule = {}; + + if (typeof ruleXml.AllowedMethods !== 'undefined') { + if (ruleXml.AllowedMethods !== '') { + rule.AllowedMethods = ruleXml.AllowedMethods.split(','); + } + else { + rule.AllowedMethods = []; + } + } + + if (typeof ruleXml.AllowedOrigins !== 'undefined') { + if (ruleXml.AllowedOrigins !== '') { + rule.AllowedOrigins = ruleXml.AllowedOrigins.split(','); + } + else { + rule.AllowedOrigins = []; + } + } + + if (typeof ruleXml.AllowedHeaders !== 'undefined') { + if (ruleXml.AllowedHeaders !== '') { + rule.AllowedHeaders = ruleXml.AllowedHeaders.split(','); + } + else { + rule.AllowedHeaders = []; + } + } + + if (typeof ruleXml.ExposedHeaders !== 'undefined') { + if (ruleXml.ExposedHeaders !== '') { + rule.ExposedHeaders = ruleXml.ExposedHeaders.split(','); + } + else { + rule.ExposedHeaders = []; + } + } + + if (typeof ruleXml.MaxAgeInSeconds !== 'undefined') { + rule.MaxAgeInSeconds = parseInt(ruleXml.MaxAgeInSeconds, 10); + } + + cors.CorsRule.push(rule); + }); + } + + return cors; +} + +function parseDeleteRetentionPolicy(deleteRetentionPolicyXml) { + var deleteRetentionPolicy = {}; + + if (typeof deleteRetentionPolicyXml.Enabled !== 'undefined') { + deleteRetentionPolicy.Enabled = deleteRetentionPolicyXml.Enabled === 'true'; + } + + if (typeof deleteRetentionPolicyXml.Days !== 'undefined') { + deleteRetentionPolicy.Days = parseInt(deleteRetentionPolicyXml.Days); + } + + return deleteRetentionPolicy; +} + +function parseStaticWebsite(staticWebsiteXml) { + var staticWebsite = {}; + + if (typeof staticWebsiteXml.Enabled !== 'undefined') { + staticWebsite.Enabled = staticWebsiteXml.Enabled === 'true'; + } + + if (typeof staticWebsiteXml.IndexDocument !== 'undefined') { + staticWebsite.IndexDocument = staticWebsiteXml.IndexDocument; + } + + if (typeof staticWebsiteXml.ErrorDocument404Path !== 'undefined') { + staticWebsite.ErrorDocument404Path = staticWebsiteXml.ErrorDocument404Path; + } + + return staticWebsite; +} + +exports.parse = function (servicePropertiesXml) { + var serviceProperties = {}; + + if (typeof servicePropertiesXml.Logging !== 'undefined') { + serviceProperties.Logging = parseLogging(servicePropertiesXml.Logging); + } + + if (typeof servicePropertiesXml.HourMetrics !== 'undefined') { + serviceProperties.HourMetrics = parseMetrics(servicePropertiesXml.HourMetrics); + } + + if (typeof servicePropertiesXml.MinuteMetrics !== 'undefined') { + serviceProperties.MinuteMetrics = parseMetrics(servicePropertiesXml.MinuteMetrics); + } + + if (typeof servicePropertiesXml.Cors !== 'undefined') { + serviceProperties.Cors = parseCors(servicePropertiesXml.Cors); + } + + if (typeof servicePropertiesXml.DefaultServiceVersion !== 'undefined') { + serviceProperties.DefaultServiceVersion = servicePropertiesXml.DefaultServiceVersion; + } + + if (typeof servicePropertiesXml.DeleteRetentionPolicy !== 'undefined') { + serviceProperties.DeleteRetentionPolicy = parseDeleteRetentionPolicy(servicePropertiesXml.DeleteRetentionPolicy); + } + + if (typeof servicePropertiesXml.StaticWebsite !== 'undefined') { + serviceProperties.StaticWebsite = parseStaticWebsite(servicePropertiesXml.StaticWebsite); + } + + return serviceProperties; +}; + +/***/ }), + +/***/ 54525: +/***/ ((module, exports) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +exports = module.exports; + +exports.parse = function (serviceStatsXml) { + var serviceStats = {}; + + if (typeof serviceStatsXml.GeoReplication !== 'undefined') { + serviceStats.GeoReplication = {}; + + if (typeof serviceStatsXml.GeoReplication.Status !== 'undefined') { + serviceStats.GeoReplication.Status = serviceStatsXml.GeoReplication.Status; + } + + if (typeof serviceStatsXml.GeoReplication.LastSyncTime !== 'undefined' && serviceStatsXml.GeoReplication.LastSyncTime !== '') { + serviceStats.GeoReplication.LastSyncTime = new Date(serviceStatsXml.GeoReplication.LastSyncTime); + } + } + + return serviceStats; +}; + +/***/ }), + +/***/ 39649: +/***/ ((module) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +/** +* Creates a new TokenCredential object. +* @class +* The TokenCredential class is used to store the access token string. +* +* @constructor +* @param {string} token The access token, such as an OAuth access token in string type. +* +* @example +* var azure = require('azure-storage'); +* var tokenCredential = new azure.TokenCredential('myOAuthAccessToken'); +* var blobService = azure.createBlobServiceWithTokenCredential('https://account.blob.core.windows.net', tokenCredential); +* tokenCredential.set('updatedOAuthAccessToken'); +*/ +function TokenCredential (token) { + this.token = token; +} + +/** +* Get current access token. +* +* @return {string} The current access token in string type. +*/ +TokenCredential.prototype.get = function () { + return this.token; +}; + +/** +* Renew the access token. +* +* @param {string} token The new access token in string. +*/ +TokenCredential.prototype.set = function (token) { + this.token = token; +}; + +module.exports = TokenCredential; + +/***/ }), + +/***/ 14304: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +module.exports = __nccwpck_require__(29809); + +/***/ }), + +/***/ 18876: +/***/ ((module, exports, __nccwpck_require__) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +var util = __nccwpck_require__(73837); + +var azureUtil = __nccwpck_require__(3396); +var errors = __nccwpck_require__(12528); +var SR = __nccwpck_require__(13497); +var Constants = __nccwpck_require__(60658); + +exports = module.exports; + +/** +* The default protocol. +*/ +exports.DEFAULT_PROTOCOL = Constants.HTTPS; + +var NoMatchError = function (msg, constr) { + errors.captureStackTrace(this, constr || this); + this.message = msg || 'Error'; +}; + +util.inherits(NoMatchError, Error); +NoMatchError.prototype.name = 'NoMatchError'; + +exports.NoMatchError = NoMatchError; + +/** +* Throws an exception if the connection string format does not match any of the +* available formats. +* +* @param {string} connectionString The invalid formatted connection string. +* @return none +*/ +exports.noMatchConnectionString = function (connectionString) { + throw new NoMatchError('The provided connection string "' + connectionString + '" does not have complete configuration settings.'); +}; + +/** +* Throws an exception if the settings dont match any of the +* available formats. +* +* @param {object} settings The invalid settings. +* @return none +*/ +exports.noMatchSettings = function (settings) { + throw new NoMatchError('The provided settings ' + JSON.stringify(settings) + ' are not complete.'); +}; + +/** +* Parses the connection string and then validate that the parsed keys belong to +* the validSettingKeys +* +* @param {string} connectionString The user provided connection string. +* @param {array} validKeys The valid keys. +* @return {array} The tokenized connection string keys. +*/ +exports.parseAndValidateKeys = function (connectionString, validKeys) { + var parsedConnectionString = { }; + + // parse key/value pairs from connection string + var pairs = connectionString.split(';'); + for (var m = 0; m < pairs.length; m++) { + if (pairs[m].length === 0) { + continue; + } + + var equalDex = pairs[m].indexOf('='); + if (equalDex < 0) { + throw new SyntaxError(SR.INVALID_CONNECTION_STRING); + } else if (equalDex === 0) { + // empty key name. + throw new SyntaxError(SR.INVALID_CONNECTION_STRING_EMPTY_KEY); + } + + var key = pairs[m].substring(0, equalDex); + + // assure that all given keys are valid. + if (!azureUtil.inArrayInsensitive(key, validKeys)) { + throw new SyntaxError(util.format(SR.INVALID_CONNECTION_STRING_BAD_KEY, key)); + } + + var value = pairs[m].substring(equalDex + 1); + + if(typeof parsedConnectionString[key] === 'undefined'){ + parsedConnectionString[key] = value; + } else { + // duplicate key name + throw new SyntaxError(util.format(SR.INVALID_CONNECTION_STRING_DUPLICATE_KEY, key)); + } + } + + return parsedConnectionString; +}; + +/** +* Creates an anonymous function that acts as predicate to perform a validation. +* +* @param array {requirements} The array of conditions to satisfy. +* @param boolean {isRequired} Either these conditions are all required or all +* optional. +* @param boolean {atLeastOne} Indicates that at least one requirement must +* succeed. +* @return {function} +*/ +exports.getValidator = function (requirements, isRequired, atLeastOne) { + return function (userSettings) { + var oneFound = false; + var result = { }; + + for (var key in userSettings) { + if (userSettings.hasOwnProperty(key)) { + result[key.toLowerCase()] = userSettings[key]; + } + } + + for (var requirement in requirements) { + if (requirements.hasOwnProperty(requirement)) { + var settingName = requirements[requirement].SettingName.toLowerCase(); + + // Check if the setting name exists in the provided user settings. + if (result[settingName]) { + // Check if the provided user setting value is valid. + var validationFunc = requirements[requirement].SettingConstraint; + var isValid = validationFunc(result[settingName]); + + if (isValid) { + // Remove the setting as indicator for successful validation. + delete result[settingName]; + oneFound = true; + } + } else if (isRequired) { + // If required then fail because the setting does not exist + return null; + } + } + } + + if (atLeastOne) { + // At least one requirement must succeed, otherwise fail. + return oneFound ? result : null; + } else { + return result; + } + }; +}; + +/** +* Creates a setting value condition that validates it is one of the +* passed valid values. +* +* @param {string} name The setting key name. +* @return {array} +*/ +exports.setting = function (name) { + var validValues = Array.prototype.slice.call(arguments, 1, arguments.length); + + var predicate = function (settingValue) { + var validValuesString = JSON.stringify(validValues); + if (validValues.length === 0) { + // No restrictions, succeed. + return true; + } + + // Check to find if the settingValue is valid or not. + for (var index = 0; index < validValues.length; index++) { + if (settingValue.toString() == validValues[index].toString()) { + // SettingValue is found in valid values set, succeed. + return true; + } + } + + // settingValue is missing in valid values set, fail. + throw new RangeError('The provided config value ' + settingValue + ' does not belong to the valid values subset:\n' + validValuesString); + }; + + return exports.settingWithFunc(name, predicate); +}; + +/** +* Creates an "at lease one" predicate for the provided list of requirements. +* +* @return callable +*/ +exports.atLeastOne = function () { + var allSettings = arguments; + return exports.getValidator(allSettings, false, true); +}; + +/** +* Creates an optional predicate for the provided list of requirements. +* +* @return {function} +*/ +exports.optional = function () { + var optionalSettings = arguments; + return exports.getValidator(optionalSettings, false, false); +}; + +/** +* Creates an required predicate for the provided list of requirements. +* +* @return {function} +*/ +exports.allRequired = function () { + var requiredSettings = arguments; + return exports.getValidator(requiredSettings, true, false); +}; + +/** +* Creates a setting value condition using the passed predicate. +* +* @param {string} name The setting key name. +* @param {function} predicate The setting value predicate. +* @return {array} +*/ +exports.settingWithFunc = function (name, predicate) { + var requirement = {}; + requirement.SettingName = name; + requirement.SettingConstraint = predicate; + + return requirement; +}; + + +/** +* Tests to see if a given list of settings matches a set of filters exactly. +* +* @param array $settings The settings to check. +* @return boolean If any filter returns null, false. If there are any settings +* left over after all filters are processed, false. Otherwise true. +*/ +exports.matchedSpecification = function (settings) { + var constraints = Array.prototype.slice.call(arguments, 1, arguments.length); + + for (var constraint in constraints) { + if (constraints.hasOwnProperty(constraint)) { + var remainingSettings = constraints[constraint](settings); + + if (!remainingSettings) { + return false; + } else { + settings = remainingSettings; + } + } + } + + return azureUtil.objectKeysLength(settings) === 0; +}; + +/***/ }), + +/***/ 22275: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var request = __nccwpck_require__(14304); +var url = __nccwpck_require__(57310); +var qs = __nccwpck_require__(63477); +var util = __nccwpck_require__(73837); +var xml2js = __nccwpck_require__(37977); +var events = __nccwpck_require__(82361); +var _ = __nccwpck_require__(7404); +var guid = __nccwpck_require__(48073); +var os = __nccwpck_require__(22037); +var extend = __nccwpck_require__(99237); +var Parser = __nccwpck_require__(40645); + +var Md5Wrapper = __nccwpck_require__(11007); +var azureutil = __nccwpck_require__(3396); +var validate = __nccwpck_require__(51046); +var SR = __nccwpck_require__(13497); +var WebResource = __nccwpck_require__(72730); +var BufferStream = __nccwpck_require__(63615); + +var ServiceSettings = __nccwpck_require__(18876); +var StorageServiceSettings = __nccwpck_require__(40308); +var Constants = __nccwpck_require__(60658); +var StorageUtilities = __nccwpck_require__(68003); +var ServicePropertiesResult = __nccwpck_require__(92434); +var TableUtilities = __nccwpck_require__(581); + +var SharedKey = __nccwpck_require__(26130); +var SharedAccessSignature = __nccwpck_require__(68327); +var TokenSigner = __nccwpck_require__(7266); + +var HeaderConstants = Constants.HeaderConstants; +var QueryStringConstants = Constants.QueryStringConstants; +var HttpResponseCodes = Constants.HttpConstants.HttpResponseCodes; +var StorageServiceClientConstants = Constants.StorageServiceClientConstants; +var defaultRequestLocationMode = Constants.RequestLocationMode.PRIMARY_ONLY; +var RequestLocationMode = Constants.RequestLocationMode; + +var Logger = __nccwpck_require__(98256); +var errors = __nccwpck_require__(12528); +var ArgumentError = errors.ArgumentError; +var ArgumentNullError = errors.ArgumentNullError; +var TimeoutError = errors.TimeoutError; +var StorageError = errors.StorageError; + +/** +* Creates a new StorageServiceClient object. +* +* @class +* The StorageServiceClient class is the base class of all the service classes. +* @constructor +* @param {string} storageAccount The storage account. +* @param {string} storageAccessKey The storage access key. +* @param {object} host The host for the service. +* @param {bool} usePathStyleUri Boolean value indicating wether to use path style uris. +* @param {string} sas The Shared Access Signature string. +* @param {TokenCredential} [token] The {@link TokenCredential} object, which can be created with an OAuth access token string. +*/ +function StorageServiceClient(storageAccount, storageAccessKey, host, usePathStyleUri, sas, token) { + StorageServiceClient['super_'].call(this); + + if(storageAccount && storageAccessKey) { + // account and key + this.storageAccount = storageAccount; + this.storageAccessKey = storageAccessKey; + this.storageCredentials = new SharedKey(this.storageAccount, this.storageAccessKey, usePathStyleUri); + } else if (sas) { + // sas + this.sasToken = sas; + this.storageCredentials = new SharedAccessSignature(sas); + } else if (token) { + // access token + this.token = token; + this.storageCredentials = new TokenSigner(token); + } else { + // anonymous + this.anonymous = true; + this.storageCredentials = { + signRequest: function(webResource, callback){ + // no op, anonymous access + callback(null); + } + }; + } + + if(host){ + this.setHost(host); + } + + this.apiVersion = HeaderConstants.TARGET_STORAGE_VERSION; + this.usePathStyleUri = usePathStyleUri; + + this._initDefaultFilter(); + + /** + * The logger of the service. To change the log level of the services, set the `[logger.level]{@link Logger#level}`. + * @name StorageServiceClient#logger + * @type Logger + * */ + this.logger = new Logger(Logger.LogLevels.INFO); + + this._setDefaultProxy(); + + this.xml2jsSettings = StorageServiceClient._getDefaultXml2jsSettings(); + this.defaultLocationMode = StorageUtilities.LocationMode.PRIMARY_ONLY; +} + +util.inherits(StorageServiceClient, events.EventEmitter); + +/** +* Gets the default xml2js settings. +* @ignore +* @return {object} The default settings +*/ +StorageServiceClient._getDefaultXml2jsSettings = function() { + var xml2jsSettings = _.clone(xml2js.defaults['0.2']); + + // these determine what happens if the xml contains attributes + xml2jsSettings.attrkey = Constants.TableConstants.XML_METADATA_MARKER; + xml2jsSettings.charkey = Constants.TableConstants.XML_VALUE_MARKER; + + // from xml2js guide: always put child nodes in an array if true; otherwise an array is created only if there is more than one. + xml2jsSettings.explicitArray = false; + + return xml2jsSettings; +}; + +/** +* Sets a host for the service. +* @ignore +* @param {string} host The host for the service. +*/ +StorageServiceClient.prototype.setHost = function (host) { + var parseHost = function(hostUri){ + var parsedHost; + if(!azureutil.objectIsNull(hostUri)) { + if(hostUri.indexOf('http') === -1 && hostUri.indexOf('//') !== 0){ + hostUri = '//' + hostUri; + } + parsedHost = url.parse(hostUri, false, true); + + if(!parsedHost.protocol){ + parsedHost.protocol = ServiceSettings.DEFAULT_PROTOCOL; + } + + if (!parsedHost.port) { + if (parsedHost.protocol === Constants.HTTPS) { + parsedHost.port = Constants.DEFAULT_HTTPS_PORT; + } else { + parsedHost.port = Constants.DEFAULT_HTTP_PORT; + } + } + + parsedHost = url.format({ + protocol: parsedHost.protocol, + port: parsedHost.port, + hostname: parsedHost.hostname, + pathname: parsedHost.pathname + }); + } + + return parsedHost; + }; + + validate.isValidHost(host); + + this.host = { + primaryHost: parseHost(host.primaryHost), + secondaryHost: parseHost(host.secondaryHost) + }; +}; + +/** +* Performs a REST service request through HTTP expecting an input stream. +* @ignore +* +* @param {WebResource} webResource The webresource on which to perform the request. +* @param {string} outputData The outgoing request data as a raw string. +* @param {object} [options] The request options. +* @param {int} [options.timeoutIntervalInMs] The timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {function} callback The response callback function. +*/ +StorageServiceClient.prototype.performRequest = function (webResource, outputData, options, callback) { + this._performRequest(webResource, { outputData: outputData }, options, callback); +}; + +/** +* Performs a REST service request through HTTP expecting an input stream. +* @ignore +* +* @param {WebResource} webResource The webresource on which to perform the request. +* @param {Stream} outputStream The outgoing request data as a stream. +* @param {object} [options] The request options. +* @param {int} [options.timeoutIntervalInMs] The timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {function} callback The response callback function. +*/ +StorageServiceClient.prototype.performRequestOutputStream = function (webResource, outputStream, options, callback) { + this._performRequest(webResource, { outputStream: outputStream }, options, callback); +}; + +/** +* Performs a REST service request through HTTP expecting an input stream. +* @ignore +* +* @param {WebResource} webResource The webresource on which to perform the request. +* @param {string} outputData The outgoing request data as a raw string. +* @param {Stream} inputStream The ingoing response data as a stream. +* @param {object} [options] The request options. +* @param {int} [options.timeoutIntervalInMs] The timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {function} callback The response callback function. +*/ +StorageServiceClient.prototype.performRequestInputStream = function (webResource, outputData, inputStream, options, callback) { + this._performRequest(webResource, { outputData: outputData, inputStream: inputStream }, options, callback); +}; + +/** +* Performs a REST service request through HTTP. +* @ignore +* +* @param {WebResource} webResource The webresource on which to perform the request. +* @param {object} body The request body. +* @param {string} [body.outputData] The outgoing request data as a raw string. +* @param {Stream} [body.outputStream] The outgoing request data as a stream. +* @param {Stream} [body.inputStream] The ingoing response data as a stream. +* @param {object} [options] The request options. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {int} [options.timeoutIntervalInMs] The timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {function} callback The response callback function. +*/ +StorageServiceClient.prototype._performRequest = function (webResource, body, options, callback) { + var self = this; + + // Sets a requestId on the webResource + if(!options.clientRequestId) { + options.clientRequestId = guid.v1(); + } + + webResource.withHeader(HeaderConstants.CLIENT_REQUEST_ID, options.clientRequestId); + + // Sets the user-agent string if the process is not started by the browser + if(!process.browser) { + var userAgentComment = util.format('(NODE-VERSION %s; %s %s)', process.version, os.type(), os.release()); + webResource.withHeader(HeaderConstants.USER_AGENT, Constants.USER_AGENT_PRODUCT_NAME + '/' + Constants.USER_AGENT_PRODUCT_VERSION + ' ' + userAgentComment); + } + + // Initialize the location that the request is going to be sent to. + if(azureutil.objectIsNull(options.locationMode)) { + options.locationMode = this.defaultLocationMode; + } + + // Initialize the location that the request can be sent to. + if(azureutil.objectIsNull(options.requestLocationMode)) { + options.requestLocationMode = defaultRequestLocationMode; + } + + // Initialize whether nagling is used or not. + if(azureutil.objectIsNull(options.useNagleAlgorithm)) { + options.useNagleAlgorithm = this.useNagleAlgorithm; + } + + this._initializeLocation(options); + + // Initialize the operationExpiryTime + this._setOperationExpiryTime(options); + + // If the output stream already got sent to server and got error back, + // we should NOT retry within the SDK as the stream data is not valid anymore if we retry directly. + // And it's very hard for SDK to re-wind the stream. + // + // If users want to retry on this kind of error, they can implement their own logic to parse the response and + // determine if they need to re-prepare a stream and call our SDK API to retry. + // + // Currently for blobs/files with size greater than 32MB (DEFAULT_SINGLE_BLOB_PUT_THRESHOLD_IN_BYTES), + // we'll send the steam by chunk buffers which doesn't have this issue. + var outputStreamSent = false; + + var operation = function (options, next) { + self._validateLocation(options); + var currentLocation = options.currentLocation; + self._buildRequestOptions(webResource, body, options, function (err, finalRequestOptions) { + if (err) { + callback({ error: err, response: null }, function (finalRequestOptions, finalCallback) { + finalCallback(finalRequestOptions); + }); + } else { + self.logger.log(Logger.LogLevels.DEBUG, 'FINAL REQUEST OPTIONS:\n' + util.inspect(finalRequestOptions)); + + if(self._maximumExecutionTimeExceeded(Date.now(), options.operationExpiryTime)) { + callback({ error: new TimeoutError(SR.MAXIMUM_EXECUTION_TIMEOUT_EXCEPTION), response: null }, function (finalRequestOptions, finalCallback) { + finalCallback(finalRequestOptions); + }); + } else { + var processResponseCallback = function (error, response) { + var responseObject; + + if (error) { + responseObject = { error: error, response: null }; + } else { + responseObject = self._processResponse(webResource, response, options); + responseObject.contentMD5 = response.contentMD5; + responseObject.length = response.length; + } + + responseObject.operationEndTime = new Date(); + // Required for listing operations to make sure successive operations go to the same location. + responseObject.targetLocation = currentLocation; + responseObject.outputStreamSent = outputStreamSent; + + callback(responseObject, next); + }; + + var endResponse; + var buildRequest = function (headersOnly, inputStream) { + // Build request (if body was set before, request will process immediately, if not it'll wait for the piping to happen + var requestStream; + + var requestWithDefaults; + + if(self.proxy) { + if(requestWithDefaults === undefined) { + requestWithDefaults = request.defaults({'proxy':self.proxy}); + } + } else { + requestWithDefaults = request; + } + + if (headersOnly) { + requestStream = requestWithDefaults(finalRequestOptions); + + requestStream.on('error', processResponseCallback); + requestStream.on('response', function (response) { + var isValid = WebResource.validResponse(response.statusCode); + if (!isValid) { + // When getting invalid response, try to get the error message for future steps to extract the detailed error information + var contentLength = parseInt(response.headers['content-length']); + var errorMessageBuffer; + var index = 0; + if (contentLength !== undefined) { + errorMessageBuffer = Buffer.alloc(contentLength); + } + + requestStream.on('data', function (data) { + if (contentLength !== undefined) { + data.copy(errorMessageBuffer, index); + index += data.length; + } else { + if (!errorMessageBuffer) { + errorMessageBuffer = data; + } else { + errorMessageBuffer = Buffer.concat([errorMessageBuffer, data]); + } + } + }); + requestStream.on('end', function () { + if (errorMessageBuffer) { + // Strip the UTF8 BOM following the same ways as 'request' module + if (errorMessageBuffer.length > 3 && + errorMessageBuffer[0] === 239 && + errorMessageBuffer[1] === 187 && + errorMessageBuffer[2] === 191) { + response.body = errorMessageBuffer.toString('utf8', 3); + } else { + response.body = errorMessageBuffer.toString('utf8'); + } + } + processResponseCallback(null, response); + }); + } else { + // Only pipe to the destination stream when we get a valid response from service + // Error message should NOT be piped to the destination stream + if (inputStream) { + requestStream.pipe(inputStream); + } + + var responseLength = 0; + var internalHash = new Md5Wrapper().createMd5Hash(); + response.on('data', function(data) { + responseLength += data.length; + internalHash.update(data); + }); + + response.on('end', function () { + // Calculate and set MD5 here + if(azureutil.objectIsNull(options.disableContentMD5Validation) || options.disableContentMD5Validation === false) { + response.contentMD5 = internalHash.digest('base64'); + } + + response.length = responseLength; + endResponse = response; + }); + } + }); + } else { + requestStream = requestWithDefaults(finalRequestOptions, processResponseCallback); + } + + //If useNagleAlgorithm is not set or the value is set and is false, setNoDelay is set to true. + if (azureutil.objectIsNull(options.useNagleAlgorithm) || options.useNagleAlgorithm === false) { + requestStream.on('request', function(httpRequest) { + httpRequest.setNoDelay(true); + }); + } + + // Workaround to avoid request from potentially setting unwanted (rejected) headers by the service + var oldEnd = requestStream.end; + requestStream.end = function () { + if (finalRequestOptions.headers['content-length']) { + requestStream.headers['content-length'] = finalRequestOptions.headers['content-length']; + } else if (requestStream.headers['content-length']) { + delete requestStream.headers['content-length']; + } + + oldEnd.call(requestStream); + }; + + // Bubble events up -- This is when the request is going to be made. + requestStream.on('response', function (response) { + self.emit('receivedResponseEvent', response); + }); + + return requestStream; + }; + + if (body && body.outputData) { + if (!azureutil.isBrowser() && Buffer.isBuffer(body.outputData)) { + // Request module will take 200MB additional memory when we pass a 100MB buffer as body + // Transfer buffer to stream will highly reduce the memory used by request module + finalRequestOptions.body = new BufferStream(body.outputData); + } else { + finalRequestOptions.body = body.outputData; + } + } + + // Pipe any input / output streams + if (body && body.inputStream) { + body.inputStream.on('close', function () { + if (endResponse) { + processResponseCallback(null, endResponse); + endResponse = null; + } + }); + body.inputStream.on('end', function () { + if (endResponse) { + processResponseCallback(null, endResponse); + endResponse = null; + } + }); + body.inputStream.on('finish', function () { + if (endResponse) { + processResponseCallback(null, endResponse); + endResponse = null; + } + }); + buildRequest(true, body.inputStream); + } else if (body && body.outputStream) { + var sendUnchunked = function () { + var size = finalRequestOptions.headers['content-length'] ? + finalRequestOptions.headers['content-length'] : + Constants.BlobConstants.MAX_SINGLE_UPLOAD_BLOB_SIZE_IN_BYTES; + + var concatBuf = Buffer.alloc(parseInt(size)); + var index = 0; + + body.outputStream.on('data', function (d) { + outputStreamSent = true; + if(self._maximumExecutionTimeExceeded(Date.now(), options.operationExpiryTime)) { + processResponseCallback(new TimeoutError(SR.MAXIMUM_EXECUTION_TIMEOUT_EXCEPTION)); + } else { + d.copy(concatBuf, index); + index += d.length; + } + }).on('end', function () { + var requestStream = buildRequest(); + requestStream.write(concatBuf); + requestStream.end(); + }); + if (azureutil.isStreamPaused(body.outputStream)) { + body.outputStream.resume(); + } + }; + + var sendStream = function () { + // NOTE: workaround for an unexpected EPIPE exception when piping streams larger than 29 MB + if (!azureutil.objectIsNull(finalRequestOptions.headers['content-length']) && finalRequestOptions.headers['content-length'] < 29 * 1024 * 1024) { + body.outputStream.pipe(buildRequest()); + outputStreamSent = true; + + if (azureutil.isStreamPaused(body.outputStream)) { + body.outputStream.resume(); + } + } else { + sendUnchunked(); + } + }; + + if (!body.outputStream.readable) { + // if the content length is zero, build the request and don't send a body + if (finalRequestOptions.headers['content-length'] === 0) { + buildRequest(); + } else { + // otherwise, wait until we know the readable stream is actually valid before piping + body.outputStream.on('open', function () { + sendStream(); + }); + } + } else { + sendStream(); + } + + // This catches any errors that happen while creating the readable stream (usually invalid names) + body.outputStream.on('error', function (error) { + processResponseCallback(error); + }); + } else { + buildRequest(); + } + } + } + }); + }; + + // The filter will do what it needs to the requestOptions and will provide a + // function to be handled after the reply + self.filter(options, function (postFiltersRequestOptions, nextPostCallback) { + if(self._maximumExecutionTimeExceeded(Date.now() + postFiltersRequestOptions.retryInterval, postFiltersRequestOptions.operationExpiryTime)) { + callback({ error: new TimeoutError(SR.MAXIMUM_EXECUTION_TIMEOUT_EXCEPTION), response: null}, function (postFiltersRequestOptions, finalCallback) { + finalCallback(postFiltersRequestOptions); + }); + } else { + // If there is a filter, flow is: + // filter -> operation -> process response + if(postFiltersRequestOptions.retryContext) { + var func = function() { + postFiltersRequestOptions.retryInterval = 0; + operation(postFiltersRequestOptions, nextPostCallback); + }; + + // Sleep for retryInterval before making the request + setTimeout(func, postFiltersRequestOptions.retryInterval); + } else { + // No retry policy filter specified + operation(postFiltersRequestOptions, nextPostCallback); + } + } + }); +}; + + +/** +* Builds the request options to be passed to the http.request method. +* @ignore +* @param {WebResource} webResource The webresource where to build the options from. +* @param {object} options The request options. +* @param {function(error, requestOptions)} callback The callback function. +*/ +StorageServiceClient.prototype._buildRequestOptions = function (webResource, body, options, callback) { + webResource.withHeader(HeaderConstants.STORAGE_VERSION, this.apiVersion); + webResource.withHeader(HeaderConstants.MS_DATE, new Date().toUTCString()); + if (!webResource.headers[HeaderConstants.ACCEPT]) { + webResource.withHeader(HeaderConstants.ACCEPT, 'application/atom+xml,application/xml'); + } + webResource.withHeader(HeaderConstants.ACCEPT_CHARSET, 'UTF-8'); + + // Browsers cache GET/HEAD requests by adding conditional headers such as 'IF_MODIFIED_SINCE' after Azure Storage 'Authorization header' calculation, + // which may result in a 403 authorization error. So add timestamp to GET/HEAD request URLs thus avoid the browser cache. + if (azureutil.isBrowser() && ( + webResource.method === Constants.HttpConstants.HttpVerbs.GET || + webResource.method === Constants.HttpConstants.HttpVerbs.HEAD)) { + webResource.withQueryOption(HeaderConstants.FORCE_NO_CACHE_IN_BROWSER, new Date().getTime()); + } + + if(azureutil.objectIsNull(options.timeoutIntervalInMs)) { + options.timeoutIntervalInMs = this.defaultTimeoutIntervalInMs; + } + + if(azureutil.objectIsNull(options.clientRequestTimeoutInMs)) { + options.clientRequestTimeoutInMs = this.defaultClientRequestTimeoutInMs; + } + + if(!azureutil.objectIsNull(options.timeoutIntervalInMs) && options.timeoutIntervalInMs > 0) { + webResource.withQueryOption(QueryStringConstants.TIMEOUT, Math.ceil(options.timeoutIntervalInMs / 1000)); + } + + if(options.accessConditions) { + webResource.withHeader(HeaderConstants.IF_MATCH, options.accessConditions.EtagMatch); + webResource.withHeader(HeaderConstants.IF_MODIFIED_SINCE, options.accessConditions.DateModifedSince); + webResource.withHeader(HeaderConstants.IF_NONE_MATCH, options.accessConditions.EtagNonMatch); + webResource.withHeader(HeaderConstants.IF_UNMODIFIED_SINCE, options.accessConditions.DateUnModifiedSince); + webResource.withHeader(HeaderConstants.SEQUENCE_NUMBER_EQUAL, options.accessConditions.SequenceNumberEqual); + webResource.withHeader(HeaderConstants.SEQUENCE_NUMBER_LESS_THAN, options.accessConditions.SequenceNumberLessThan); + webResource.withHeader(HeaderConstants.SEQUENCE_NUMBER_LESS_THAN_OR_EQUAL, options.accessConditions.SequenceNumberLessThanOrEqual); + webResource.withHeader(HeaderConstants.BLOB_CONDITION_MAX_SIZE, options.accessConditions.MaxBlobSize); + webResource.withHeader(HeaderConstants.BLOB_CONDITION_APPEND_POSITION, options.accessConditions.MaxAppendPosition); + } + + if(options.sourceAccessConditions) { + webResource.withHeader(HeaderConstants.SOURCE_IF_MATCH, options.sourceAccessConditions.EtagMatch); + webResource.withHeader(HeaderConstants.SOURCE_IF_MODIFIED_SINCE, options.sourceAccessConditions.DateModifedSince); + webResource.withHeader(HeaderConstants.SOURCE_IF_NONE_MATCH, options.sourceAccessConditions.EtagNonMatch); + webResource.withHeader(HeaderConstants.SOURCE_IF_UNMODIFIED_SINCE, options.sourceAccessConditions.DateUnModifiedSince); + } + + if (!webResource.headers || webResource.headers[HeaderConstants.CONTENT_TYPE] === undefined) { + // work around to add an empty content type header to prevent the request module from magically adding a content type. + webResource.headers[HeaderConstants.CONTENT_TYPE] = ''; + } else if (webResource.headers && webResource.headers[HeaderConstants.CONTENT_TYPE] === null) { + delete webResource.headers[HeaderConstants.CONTENT_TYPE]; + } + + if (!webResource.headers || webResource.headers[HeaderConstants.CONTENT_LENGTH] === undefined) { + if (body && body.outputData) { + webResource.withHeader(HeaderConstants.CONTENT_LENGTH, Buffer.byteLength(body.outputData, 'UTF8')); + } else if (webResource.headers[HeaderConstants.CONTENT_LENGTH] === undefined) { + webResource.withHeader(HeaderConstants.CONTENT_LENGTH, 0); + } + } else if (webResource.headers && webResource.headers[HeaderConstants.CONTENT_LENGTH] === null) { + delete webResource.headers[HeaderConstants.CONTENT_LENGTH]; + } + + var enableGlobalHttpAgent = this.enableGlobalHttpAgent; + + // Sets the request url in the web resource. + this._setRequestUrl(webResource, options); + + this.emit('sendingRequestEvent', webResource); + + // Now that the web request is finalized, sign it + this.storageCredentials.signRequest(webResource, function (error) { + var requestOptions = null; + + if (!error) { + var targetUrl = webResource.uri; + + requestOptions = { + uri: url.format(targetUrl), + method: webResource.method, + headers: webResource.headers, + mode: 'disable-fetch' + }; + + if (options) { + //set encoding of response data. If set to null, the body is returned as a Buffer + requestOptions.encoding = options.responseEncoding; + } + + if (options && options.clientRequestTimeoutInMs) { + requestOptions.timeout = options.clientRequestTimeoutInMs; + } else { + requestOptions.timeout = Constants.DEFAULT_CLIENT_REQUEST_TIMEOUT_IN_MS; // 2 minutes + } + + // If global HTTP agent is not enabled, use forever agent. + if (enableGlobalHttpAgent !== true) { + requestOptions.forever = true; + } + } + + callback(error, requestOptions); + }); +}; + +/** +* Process the response. +* @ignore +* +* @param {WebResource} webResource The web resource that made the request. +* @param {Response} response The response object. +* @param {Options} options The response parsing options. +* @param {String} options.payloadFormat The payload format. +* @return The normalized responseObject. +*/ +StorageServiceClient.prototype._processResponse = function (webResource, response, options) { + var self = this; + + function convertRawHeadersToHeaders(rawHeaders) { + var headers = {}; + if(!rawHeaders) { + return undefined; + } + + for(var i = 0; i < rawHeaders.length; i++) { + var headerName; + if (rawHeaders[i].indexOf(HeaderConstants.PREFIX_FOR_STORAGE_METADATA) === 0) { + headerName = rawHeaders[i]; + } else { + headerName = rawHeaders[i].toLowerCase(); + } + headers[headerName] = rawHeaders[++i]; + } + + return headers; + } + + var validResponse = WebResource.validResponse(response.statusCode); + var rsp = StorageServiceClient._buildResponse(validResponse, response.body, convertRawHeadersToHeaders(response.rawHeaders) || response.headers, response.statusCode, response.md5); + var responseObject; + + if (validResponse && webResource.rawResponse) { + responseObject = { error: null, response: rsp }; + } else { + // attempt to parse the response body, errors will be returned in rsp.error without modifying the body + rsp = StorageServiceClient._parseResponse(rsp, self.xml2jsSettings, options); + + if (validResponse && !rsp.error) { + responseObject = { error: null, response: rsp }; + } else { + rsp.isSuccessful = false; + + if (response.statusCode < 400 || response.statusCode >= 500) { + this.logger.log(Logger.LogLevels.DEBUG, + 'ERROR code = ' + response.statusCode + ' :\n' + util.inspect(rsp.body)); + } + + // responseObject.error should contain normalized parser errors if they occured in _parseResponse + // responseObject.response.body should contain the raw response body in that case + var errorBody = rsp.body; + if(rsp.error) { + errorBody = rsp.error; + delete rsp.error; + } + + if (!errorBody) { + var code = Object.keys(HttpResponseCodes).filter(function (name) { + if (HttpResponseCodes[name] === rsp.statusCode) { + return name; + } + }); + + errorBody = { error: { code: code[0] } }; + } + + var normalizedError = StorageServiceClient._normalizeError(errorBody, response); + responseObject = { error: normalizedError, response: rsp }; + } + } + + this.logger.log(Logger.LogLevels.DEBUG, 'RESPONSE:\n' + util.inspect(responseObject)); + + return responseObject; +}; + +/** +* Associate a filtering operation with this StorageServiceClient. Filtering operations +* can include logging, automatically retrying, etc. Filter operations are objects +* that implement a method with the signature: +* +* "function handle (requestOptions, next)". +* +* After doing its preprocessing on the request options, the method needs to call +* "next" passing a callback with the following signature: +* signature: +* +* "function (returnObject, finalCallback, next)" +* +* In this callback, and after processing the returnObject (the response from the +* request to the server), the callback needs to either invoke next if it exists to +* continue processing other filters or simply invoke finalCallback otherwise to end +* up the service invocation. +* +* @param {Object} filter The new filter object. +* @return {StorageServiceClient} A new service client with the filter applied. +*/ +StorageServiceClient.prototype.withFilter = function (newFilter) { + // Create a new object with the same members as the current service + var derived = _.clone(this); + + // If the current service has a filter, merge it with the new filter + // (allowing us to effectively pipeline a series of filters) + var parentFilter = this.filter; + var mergedFilter = newFilter; + if (parentFilter !== undefined) { + // The parentFilterNext is either the operation or the nextPipe function generated on a previous merge + // Ordering is [f3 pre] -> [f2 pre] -> [f1 pre] -> operation -> [f1 post] -> [f2 post] -> [f3 post] + mergedFilter = function (originalRequestOptions, parentFilterNext) { + newFilter.handle(originalRequestOptions, function (postRequestOptions, newFilterCallback) { + // handle parent filter pre and get Parent filter post + var next = function (postPostRequestOptions, parentFilterCallback) { + // The parentFilterNext is the filter next to the merged filter. + // For 2 filters, that'd be the actual operation. + parentFilterNext(postPostRequestOptions, function (responseObject, responseCallback, finalCallback) { + parentFilterCallback(responseObject, finalCallback, function (postResponseObject) { + newFilterCallback(postResponseObject, responseCallback, finalCallback); + }); + }); + }; + + parentFilter(postRequestOptions, next); + }); + }; + } + + // Store the filter so it can be applied in performRequest + derived.filter = mergedFilter; + return derived; + }; + +/* +* Builds a response object with normalized key names. +* @ignore +* +* @param {Bool} isSuccessful Boolean value indicating if the request was successful +* @param {Object} body The response body. +* @param {Object} headers The response headers. +* @param {int} statusCode The response status code. +* @param {string} md5 The response's content md5 hash. +* @return {Object} A response object. +*/ +StorageServiceClient._buildResponse = function (isSuccessful, body, headers, statusCode, md5) { + var response = { + isSuccessful: isSuccessful, + statusCode: statusCode, + body: body, + headers: headers, + md5: md5 + }; + + if (!azureutil.objectIsNull(headers)) { + if (headers[HeaderConstants.REQUEST_SERVER_ENCRYPTED] !== undefined) { + response.requestServerEncrypted = (headers[HeaderConstants.REQUEST_SERVER_ENCRYPTED] === 'true'); + } + } + + return response; +}; + +/** +* Parses a server response body from XML or JSON into a JS object. +* This is done using the xml2js library. +* @ignore +* +* @param {object} response The response object with a property "body" with a XML or JSON string content. +* @param {object} xml2jsSettings The XML to json settings. +* @param {Options} options The response parsing options. +* @param {String} options.payloadFormat The payload format. +* @return {object} The same response object with the body part as a JS object instead of a XML or JSON string. +*/ +StorageServiceClient._parseResponse = function (response, xml2jsSettings, options) { + function parseXml(body) { + var parsed; + var parser = new xml2js.Parser(xml2jsSettings); + parser.parseString(azureutil.removeBOM(body.toString()), function (err, parsedBody) { + if (err) { + var xmlError = new SyntaxError('EXMLFORMAT'); + xmlError.innerError = err; + throw xmlError; + } else { parsed = parsedBody; } + }); + + return parsed; + } + + if (response.body && Buffer.byteLength(response.body.toString()) > 0) { + var contentType = ''; + if (response.headers && response.headers['content-type']) { + contentType = response.headers['content-type'].toLowerCase(); + } + + try { + if (contentType.indexOf('application/json') !== -1) { + if (options && options.payloadFormat && options.payloadFormat !== TableUtilities.PayloadFormat.NO_METADATA) { + var parser = new Parser(); + parser.onValue = function (value) { + response.body = value; + }; + parser.write(response.body); + } else { + response.body = JSON.parse(response.body); + } + } else if (contentType.indexOf('application/xml') !== -1 || contentType.indexOf('application/atom+xml') !== -1) { + response.body = parseXml(response.body); + } else if (contentType.indexOf('text/html') !== -1) { + response.body = response.body; + } else { + response.body = parseXml(response.body); + // throw new SyntaxError(SR.CONTENT_TYPE_MISSING, null); + } + } catch (e) { + response.error = e; + } + } + + return response; +}; + +/** +* Gets the storage settings. +* +* @param {string} [storageAccountOrConnectionString] The storage account or the connection string. +* @param {string} [storageAccessKey] The storage access key. +* @param {string} [host] The host address. +* @param {object} [sas] The Shared Access Signature string. +* @param {TokenCredential} [token] The {@link TokenCredential} object. +* +* @return {StorageServiceSettings} +*/ +StorageServiceClient.getStorageSettings = function (storageAccountOrConnectionString, storageAccessKey, host, sas, endpointSuffix, token) { + var storageServiceSettings; + if (storageAccountOrConnectionString && !storageAccessKey && !sas) { + // If storageAccountOrConnectionString was passed and no accessKey was passed, assume connection string + storageServiceSettings = StorageServiceSettings.createFromConnectionString(storageAccountOrConnectionString); + } else if ((storageAccountOrConnectionString && storageAccessKey) || sas || token || host) { + // Account and key or credentials or anonymous + storageServiceSettings = StorageServiceSettings.createExplicitly(storageAccountOrConnectionString, storageAccessKey, host, sas, endpointSuffix, token); + } else { + // Use environment variables + storageServiceSettings = StorageServiceSettings.createFromEnvironment(); + } + + return storageServiceSettings; +}; + +/** +* Sets the webResource's requestUrl based on the service client settings. +* @ignore +* +* @param {WebResource} webResource The web resource where to set the request url. +*/ +StorageServiceClient.prototype._setRequestUrl = function (webResource, options) { + // Normalize the path + // Backup the original path of the webResource to make sure it works fine even this function get executed multiple times - like RetryFilter + webResource.originalPath = webResource.originalPath || webResource.path; + webResource.path = this._getPath(webResource.originalPath); + + if(!this.host){ + throw new ArgumentNullError('this.host', SR.STORAGE_HOST_LOCATION_REQUIRED); + } + + var host = this.host.primaryHost; + + if(!azureutil.objectIsNull(options) && options.currentLocation === Constants.StorageLocation.SECONDARY) { + host = this.host.secondaryHost; + } + + if(host && host.lastIndexOf('/') !== (host.length - 1)){ + host = host + '/'; + } + + var fullPath = url.format({pathname: webResource.path, query: webResource.queryString}); + webResource.uri = url.resolve(host, fullPath); + webResource.path = url.parse(webResource.uri).pathname; +}; + +/** +* Retrieves the normalized path to be used in a request. +* It also removes any leading "/" of the path in case +* it's there before. +* @ignore +* @param {string} path The path to be normalized. +* @return {string} The normalized path. +*/ +StorageServiceClient.prototype._getPath = function (path) { + if (path === null || path === undefined) { + path = ''; + } else if (path.indexOf('/') === 0) { + path = path.substring(1); + } + + return path; +}; + +/** + * Get the url of a given path + */ +StorageServiceClient.prototype._getUrl = function (path, sasToken, primary) { + var host; + if (!azureutil.objectIsNull(primary) && primary === false) { + host = this.host.secondaryHost; + } else { + host = this.host.primaryHost; + } + + host = azureutil.trimPortFromUri(host); + if(host && host.lastIndexOf('/') !== (host.length - 1)){ + host = host + '/'; + } + + var query = qs.parse(sasToken); + var fullPath = url.format({ pathname: this._getPath(path), query: query }); + return url.resolve(host, fullPath); +}; + +/** +* Initializes the default filter. +* This filter is responsible for chaining the pre filters request into the operation and, after processing the response, +* pass it to the post processing filters. This method should only be invoked by the StorageServiceClient constructor. +* @ignore +* +*/ +StorageServiceClient.prototype._initDefaultFilter = function () { + this.filter = function (requestOptions, nextPreCallback) { + if (nextPreCallback) { + // Handle the next pre callback and pass the function to be handled as post call back. + nextPreCallback(requestOptions, function (returnObject, finalCallback, nextPostCallback) { + if (nextPostCallback) { + nextPostCallback(returnObject); + } else if (finalCallback) { + finalCallback(returnObject); + } + }); + } + }; +}; + +/** +* Retrieves the metadata headers from the response headers. +* @ignore +* +* @param {object} headers The metadata headers. +* @return {object} An object with the metadata headers (without the "x-ms-" prefix). +*/ +StorageServiceClient.prototype.parseMetadataHeaders = function (headers) { + var metadata = {}; + + if (!headers) { + return metadata; + } + + for (var header in headers) { + if (header.indexOf(HeaderConstants.PREFIX_FOR_STORAGE_METADATA) === 0) { + var key = header.substr(HeaderConstants.PREFIX_FOR_STORAGE_METADATA.length, header.length - HeaderConstants.PREFIX_FOR_STORAGE_METADATA.length); + metadata[key] = headers[header]; + } + } + + return metadata; +}; + +/** +* Gets the properties of a storage account’s service, including Azure Storage Analytics. +* @ignore +* +* @this {StorageServiceClient} +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; otherwise, `result` will contain the properties +* and `response` will contain information related to this operation. +*/ +StorageServiceClient.prototype.getAccountServiceProperties = function (optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('getServiceProperties', function (v) { + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + var webResource = WebResource.get() + .withQueryOption(QueryStringConstants.COMP, 'properties') + .withQueryOption(QueryStringConstants.RESTYPE, 'service'); + + options.requestLocationMode = RequestLocationMode.PRIMARY_OR_SECONDARY; + + var processResponseCallback = function (responseObject, next) { + responseObject.servicePropertiesResult = null; + if (!responseObject.error) { + responseObject.servicePropertiesResult = ServicePropertiesResult.parse(responseObject.response.body.StorageServiceProperties); + } + + // function to be called after all filters + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.servicePropertiesResult, returnObject.response); + }; + + // call the first filter + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Sets the properties of a storage account’s service, including Azure Storage Analytics. +* You can also use this operation to set the default request version for all incoming requests that do not have a version specified. +* +* @this {StorageServiceClient} +* @param {object} serviceProperties The service properties. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information +* if an error occurs; otherwise, `response` +* will contain information related to this operation. +*/ +StorageServiceClient.prototype.setAccountServiceProperties = function (serviceProperties, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('setServiceProperties', function (v) { + v.object(serviceProperties, 'serviceProperties'); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var servicePropertiesXml = ServicePropertiesResult.serialize(serviceProperties); + + var webResource = WebResource.put() + .withQueryOption(QueryStringConstants.COMP, 'properties') + .withQueryOption(QueryStringConstants.RESTYPE, 'service') + .withHeader(HeaderConstants.CONTENT_TYPE, 'application/xml;charset="utf-8"') + .withHeader(HeaderConstants.CONTENT_LENGTH, Buffer.byteLength(servicePropertiesXml)) + .withBody(servicePropertiesXml); + + var processResponseCallback = function (responseObject, next) { + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, webResource.body, options, processResponseCallback); +}; + +// Other functions + +/** +* Processes the error body into a normalized error object with all the properties lowercased. +* +* Error information may be returned by a service call with additional debugging information: +* http://msdn.microsoft.com/en-us/library/windowsazure/dd179382.aspx +* +* Table services returns these properties lowercased, example, "code" instead of "Code". So that the user +* can always expect the same format, this method lower cases everything. +* +* @ignore +* +* @param {Object} error The error object as returned by the service and parsed to JSON by the xml2json. +* @return {Object} The normalized error object with all properties lower cased. +*/ +StorageServiceClient._normalizeError = function (error, response) { + if (azureutil.objectIsString(error)) { + return new StorageError(error, null); + } else if (error) { + var normalizedError = {}; + + // blob/queue errors should have error.Error, table errors should have error['odata.error'] + var errorProperties = error.Error || error.error || error['odata.error'] || error['m:error'] || error; + normalizedError.code = errorProperties.message; // The message exists when there is error.Error. + + for (var property in errorProperties) { + if (errorProperties.hasOwnProperty(property)) { + var key = property.toLowerCase(); + if(key.indexOf('m:') === 0) { + key = key.substring(2); + } + + normalizedError[key] = errorProperties[property]; + + // if this is a table error, message is an object - flatten it to normalize with blob/queue errors + // ex: "message":{"lang":"en-US","value":"The specified resource does not exist."} becomes message: "The specified resource does not exist." + if (key === 'message' && _.isObject(errorProperties[property])) { + if (errorProperties[property]['value']) { + normalizedError[key] = errorProperties[property]['value']; + } + } + } + } + + // add status code and server request id if available + if (response) { + if (response.statusCode) { + normalizedError.statusCode = response.statusCode; + } + + if (response.headers && response.headers['x-ms-request-id']) { + normalizedError.requestId = response.headers['x-ms-request-id']; + } + } + + var errorObject = new StorageError(normalizedError.code, normalizedError); + return errorObject; + } + + return null; +}; + +/** +* Sets proxy object specified by caller. +* +* @param {object} proxy proxy to use for tunneling +* { +* host: hostname +* port: port number +* proxyAuth: 'user:password' for basic auth +* headers: {...} headers for proxy server +* key: key for proxy server +* ca: ca for proxy server +* cert: cert for proxy server +* } +* if null or undefined, clears proxy +*/ +StorageServiceClient.prototype.setProxy = function (proxy) { + if (proxy) { + this.proxy = proxy; + } else { + this.proxy = null; + } +}; + +/** +* Sets the service host default proxy from the environment. +* Can be overridden by calling _setProxyUrl or _setProxy +* +*/ +StorageServiceClient.prototype._setDefaultProxy = function () { + var proxyUrl = StorageServiceClient._loadEnvironmentProxyValue(); + if (proxyUrl) { + var parsedUrl = url.parse(proxyUrl); + if (!parsedUrl.port) { + parsedUrl.port = 80; + } + this.setProxy(parsedUrl); + } else { + this.setProxy(null); + } +}; + +/* +* Loads the fields "useProxy" and respective protocol, port and url +* from the environment values HTTPS_PROXY and HTTP_PROXY +* in case those are set. +* @ignore +* +* @return {string} or null +*/ +StorageServiceClient._loadEnvironmentProxyValue = function () { + var proxyUrl = null; + if (process.env[StorageServiceClientConstants.EnvironmentVariables.HTTPS_PROXY]) { + proxyUrl = process.env[StorageServiceClientConstants.EnvironmentVariables.HTTPS_PROXY]; + } else if (process.env[StorageServiceClientConstants.EnvironmentVariables.HTTPS_PROXY.toLowerCase()]) { + proxyUrl = process.env[StorageServiceClientConstants.EnvironmentVariables.HTTPS_PROXY.toLowerCase()]; + } else if (process.env[StorageServiceClientConstants.EnvironmentVariables.HTTP_PROXY]) { + proxyUrl = process.env[StorageServiceClientConstants.EnvironmentVariables.HTTP_PROXY]; + } else if (process.env[StorageServiceClientConstants.EnvironmentVariables.HTTP_PROXY.toLowerCase()]) { + proxyUrl = process.env[StorageServiceClientConstants.EnvironmentVariables.HTTP_PROXY.toLowerCase()]; + } + + return proxyUrl; +}; + +/** +* Initializes the location to which the operation is being sent to. +*/ +StorageServiceClient.prototype._initializeLocation = function (options) { + if(!azureutil.objectIsNull(options.locationMode)) { + switch(options.locationMode) { + case StorageUtilities.LocationMode.PRIMARY_ONLY: + case StorageUtilities.LocationMode.PRIMARY_THEN_SECONDARY: + options.currentLocation = Constants.StorageLocation.PRIMARY; + break; + case StorageUtilities.LocationMode.SECONDARY_ONLY: + case StorageUtilities.LocationMode.SECONDARY_THEN_PRIMARY: + options.currentLocation = Constants.StorageLocation.SECONDARY; + break; + default: + throw new RangeError(util.format(SR.ARGUMENT_OUT_OF_RANGE_ERROR, 'locationMode', options.locationMode)); + } + } else { + options.locationMode = StorageUtilities.LocationMode.PRIMARY_ONLY; + options.currentLocation = Constants.StorageLocation.PRIMARY; + } +}; + +/** +* Validates the location to which the operation is being sent to. +*/ +StorageServiceClient.prototype._validateLocation = function (options) { + if(this._invalidLocationMode(options.locationMode)) { + throw new ArgumentNullError('host', SR.STORAGE_HOST_MISSING_LOCATION); + } + + switch(options.requestLocationMode) { + case Constants.RequestLocationMode.PRIMARY_ONLY: + if(options.locationMode === StorageUtilities.LocationMode.SECONDARY_ONLY) { + throw new ArgumentError('host.primaryHost', SR.PRIMARY_ONLY_COMMAND); + } + + options.currentLocation = Constants.StorageLocation.PRIMARY; + options.locationMode = StorageUtilities.LocationMode.PRIMARY_ONLY; + break; + + case Constants.RequestLocationMode.SECONDARY_ONLY: + if(options.locationMode === StorageUtilities.LocationMode.PRIMARY_ONLY) { + throw new ArgumentError('host.secondaryHost', SR.SECONDARY_ONLY_COMMAND); + } + + options.currentLocation = Constants.StorageLocation.SECONDARY; + options.locationMode = StorageUtilities.LocationMode.SECONDARY_ONLY; + break; + + default: + // no op + } +}; + +/** +* Checks whether we have the relevant host information based on the locationMode. +*/ +StorageServiceClient.prototype._invalidLocationMode = function (locationMode) { + switch(locationMode) { + case StorageUtilities.LocationMode.PRIMARY_ONLY: + return azureutil.objectIsNull(this.host.primaryHost); + case StorageUtilities.LocationMode.SECONDARY_ONLY: + return azureutil.objectIsNull(this.host.secondaryHost); + default: + return (azureutil.objectIsNull(this.host.primaryHost) || azureutil.objectIsNull(this.host.secondaryHost)); + } +}; + +/** +* Checks to see if the maximum execution timeout provided has been exceeded. +*/ +StorageServiceClient.prototype._maximumExecutionTimeExceeded = function (currentTime, expiryTime) { + if(!azureutil.objectIsNull(expiryTime) && currentTime > expiryTime) { + return true; + } else { + return false; + } +}; + +/** +* Sets the operation expiry time. +*/ +StorageServiceClient.prototype._setOperationExpiryTime = function (options) { + if(azureutil.objectIsNull(options.operationExpiryTime)) { + if(!azureutil.objectIsNull(options.maximumExecutionTimeInMs)) { + options.operationExpiryTime = Date.now() + options.maximumExecutionTimeInMs; + } else if(this.defaultMaximumExecutionTimeInMs) { + options.operationExpiryTime = Date.now() + this.defaultMaximumExecutionTimeInMs; + } + } +}; + +module.exports = StorageServiceClient; + + +/***/ }), + +/***/ 40308: +/***/ ((module, exports, __nccwpck_require__) => { + +"use strict"; +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + + + +var _ = __nccwpck_require__(7404); +var url = __nccwpck_require__(57310); + +var util = __nccwpck_require__(3396); +var ServiceSettings = __nccwpck_require__(18876); +var Constants = __nccwpck_require__(60658); +var StorageServiceClientConstants = Constants.StorageServiceClientConstants; +var ConnectionStringKeys = Constants.ConnectionStringKeys; +var Validate = __nccwpck_require__(51046); +var SR = __nccwpck_require__(13497); +var TokenCredential = __nccwpck_require__(39649); + +var useDevelopmentStorageSetting = ServiceSettings.setting(ConnectionStringKeys.USE_DEVELOPMENT_STORAGE_NAME, true); +var developmentStorageProxyUriSetting = ServiceSettings.settingWithFunc(ConnectionStringKeys.DEVELOPMENT_STORAGE_PROXY_URI_NAME, Validate.isValidUri); +var defaultEndpointsProtocolSetting = ServiceSettings.setting(ConnectionStringKeys.DEFAULT_ENDPOINTS_PROTOCOL_NAME, 'http', 'https'); +var accountNameSetting = ServiceSettings.setting(ConnectionStringKeys.ACCOUNT_NAME_NAME); +var accountKeySetting = ServiceSettings.settingWithFunc(ConnectionStringKeys.ACCOUNT_KEY_NAME, Validate.isBase64Encoded); +var sasSetting = ServiceSettings.settingWithFunc(ConnectionStringKeys.SHARED_ACCESS_SIGNATURE_NAME, _.isString); +var tokenSetting = ServiceSettings.settingWithFunc('token', function (object) {return object instanceof TokenCredential;}); + +var blobEndpointSetting = ServiceSettings.settingWithFunc( + ConnectionStringKeys.BLOB_ENDPOINT_NAME, + Validate.isValidHost +); + +var queueEndpointSetting = ServiceSettings.settingWithFunc( + ConnectionStringKeys.QUEUE_ENDPOINT_NAME, + Validate.isValidHost +); + +var tableEndpointSetting = ServiceSettings.settingWithFunc( + ConnectionStringKeys.TABLE_ENDPOINT_NAME, + Validate.isValidHost +); + +var fileEndpointSetting = ServiceSettings.settingWithFunc( + ConnectionStringKeys.FILE_ENDPOINT_NAME, + Validate.isValidHost +); + +var endpointSuffixSetting = ServiceSettings.settingWithFunc( + ConnectionStringKeys.ENDPOINT_SUFFIX_NAME, + Validate.isValidHost +); + +var validKeys = [ + ConnectionStringKeys.USE_DEVELOPMENT_STORAGE_NAME, + ConnectionStringKeys.DEVELOPMENT_STORAGE_PROXY_URI_NAME, + ConnectionStringKeys.DEFAULT_ENDPOINTS_PROTOCOL_NAME, + ConnectionStringKeys.ACCOUNT_NAME_NAME, + ConnectionStringKeys.ACCOUNT_KEY_NAME, + ConnectionStringKeys.SHARED_ACCESS_SIGNATURE_NAME, + ConnectionStringKeys.BLOB_ENDPOINT_NAME, + ConnectionStringKeys.QUEUE_ENDPOINT_NAME, + ConnectionStringKeys.TABLE_ENDPOINT_NAME, + ConnectionStringKeys.FILE_ENDPOINT_NAME, + ConnectionStringKeys.ENDPOINT_SUFFIX_NAME +]; + +/** +* Creates new storage service settings instance. +* +* @param {string} name The storage service name. +* @param {string} key The storage service key. +* @param {string} sasToken The storage service shared access signature token. +* @param {string} blobEndpoint The storage service blob endpoint. +* @param {string} queueEndpoint The storage service queue endpoint. +* @param {string} tableEndpoint The storage service table endpoint. +* @param {string} fileEndpoint The storage service file endpoint. +* @param {bool} usePathStyleUri Boolean value indicating wether to use path style uri or not. +* @param {TokenCredential} [token] The {@link TokenCredential} object. +*/ +function StorageServiceSettings(name, key, sasToken, blobEndpoint, queueEndpoint, tableEndpoint, fileEndpoint, usePathStyleUri, token) { + this._name = name; + this._key = key; + + if (sasToken && sasToken[0] === '?') { + this._sasToken = sasToken.slice(1); + } else { + this._sasToken = sasToken; + } + + this._blobEndpoint = blobEndpoint; + this._queueEndpoint = queueEndpoint; + this._tableEndpoint = tableEndpoint; + this._fileEndpoint = fileEndpoint; + + if (usePathStyleUri) { + this._usePathStyleUri = usePathStyleUri; + } else { + this._usePathStyleUri = false; + } + + this._token = token; +} + +/** +* Creates a StorageServiceSettings object from the given connection string. +* +* @param {string} connectionString The storage settings connection string. +* @return {StorageServiceSettings} +*/ +StorageServiceSettings.createFromConnectionString = function (connectionString) { + var tokenizedSettings = ServiceSettings.parseAndValidateKeys(connectionString, validKeys); + + try { + return StorageServiceSettings.createFromSettings(tokenizedSettings); + } catch (e) { + if (e instanceof ServiceSettings.NoMatchError) { + // Replace no match settings exception by no match connection string one. + ServiceSettings.noMatchConnectionString(connectionString); + } else { + throw e; + } + } +}; + +StorageServiceSettings.createExplicitly = function (storageAccount, storageAccessKey, host, sasToken, endpointSuffix, token) { + var settings = {}; + function addIfNotNullOrEmpty(key, value){ + if(typeof value === 'string' && !util.stringIsEmpty(value)){ + settings[key] = value; + } else if (typeof value == 'object' && !util.objectIsNull(value)) { + settings[key] = value; + } + } + + // Endpoints + if (host) { + addIfNotNullOrEmpty('blobendpoint', host); + addIfNotNullOrEmpty('tableendpoint', host); + addIfNotNullOrEmpty('queueendpoint', host); + addIfNotNullOrEmpty('fileendpoint', host); + } else { + addIfNotNullOrEmpty('defaultendpointsprotocol', ServiceSettings.DEFAULT_PROTOCOL.split(':', 1)[0]); + } + + addIfNotNullOrEmpty('accountname', storageAccount); + addIfNotNullOrEmpty('accountkey', storageAccessKey); + addIfNotNullOrEmpty('sharedaccesssignature', sasToken); + addIfNotNullOrEmpty('endpointsuffix', endpointSuffix); + addIfNotNullOrEmpty('token', token); + + return StorageServiceSettings.createFromSettings(settings); +}; + +StorageServiceSettings.createFromEnvironment = function () { + var emulated = process.env[StorageServiceClientConstants.EnvironmentVariables.EMULATED]; + if (emulated) { + return StorageServiceSettings.getDevelopmentStorageAccountSettings(); + } + + var connectionString = process.env[StorageServiceClientConstants.EnvironmentVariables.AZURE_STORAGE_CONNECTION_STRING]; + if (connectionString) { + return StorageServiceSettings.createFromConnectionString(connectionString); + } + + var storageAccount = process.env[StorageServiceClientConstants.EnvironmentVariables.AZURE_STORAGE_ACCOUNT]; + var storageAccessKey = process.env[StorageServiceClientConstants.EnvironmentVariables.AZURE_STORAGE_ACCESS_KEY]; + if(storageAccount && storageAccessKey){ + return StorageServiceSettings.createExplicitly(storageAccount, storageAccessKey, null, null, null); + } + + throw new Error(SR.NO_CREDENTIALS_PROVIDED); +}; + +/** +* Creates a StorageServiceSettings object from a set of settings. +* +* @param {object} settings The settings object. +* @return {StorageServiceSettings} +*/ +StorageServiceSettings.createFromSettings = function (settings) { + // Devstore case + var matchedSpecs = ServiceSettings.matchedSpecification( + settings, + ServiceSettings.allRequired(useDevelopmentStorageSetting), + ServiceSettings.optional(developmentStorageProxyUriSetting) + ); + + if (matchedSpecs) { + var proxyUri = util.tryGetValueInsensitive( + ConnectionStringKeys.DEVELOPMENT_STORAGE_PROXY_URI_NAME, + settings + ); + + return this.getDevelopmentStorageAccountSettings(proxyUri); + } + + // Account/Key automatic case + matchedSpecs = ServiceSettings.matchedSpecification( + settings, + ServiceSettings.allRequired( + defaultEndpointsProtocolSetting, + accountNameSetting, + accountKeySetting + ), + ServiceSettings.optional( + blobEndpointSetting, + queueEndpointSetting, + tableEndpointSetting, + fileEndpointSetting, + endpointSuffixSetting + ) + ); + + if (matchedSpecs) { + return this._createStorageServiceSettings(settings); + } + + // Account/Key explicit case + matchedSpecs = ServiceSettings.matchedSpecification( + settings, + ServiceSettings.allRequired( + accountNameSetting, + accountKeySetting + ), + ServiceSettings.atLeastOne( + blobEndpointSetting, + queueEndpointSetting, + tableEndpointSetting, + fileEndpointSetting, + endpointSuffixSetting + ) + ); + + if (matchedSpecs) { + return this._createStorageServiceSettings(settings); + } + + // SAS case + matchedSpecs = ServiceSettings.matchedSpecification( + settings, + ServiceSettings.allRequired( + sasSetting + ), + ServiceSettings.atLeastOne( + blobEndpointSetting, + queueEndpointSetting, + tableEndpointSetting, + fileEndpointSetting, + endpointSuffixSetting + ) + ); + + if(matchedSpecs) { + return this._createStorageServiceSettings(settings); + } + + // anonymous explicit case + // Only blob anonymous access is valid. + matchedSpecs = ServiceSettings.matchedSpecification( + settings, + ServiceSettings.allRequired( + blobEndpointSetting + ), + ServiceSettings.optional( + fileEndpointSetting, + queueEndpointSetting, + tableEndpointSetting, + endpointSuffixSetting + ) + ); + + if(matchedSpecs) { + return this._createStorageServiceSettings(settings); + } + + // Token case + matchedSpecs = ServiceSettings.matchedSpecification( + settings, + ServiceSettings.allRequired( + tokenSetting + ), + ServiceSettings.atLeastOne( + blobEndpointSetting, + queueEndpointSetting, + tableEndpointSetting, + fileEndpointSetting + ) + ); + + if(matchedSpecs) { + return this._createStorageServiceSettings(settings); + } + + ServiceSettings.noMatchSettings(settings); +}; + +/** +* Returns a StorageServiceSettings with development storage credentials using +* the specified proxy Uri. +* +* @param {string} proxyUri The proxy endpoint to use. +* @return {StorageServiceSettings} +*/ +StorageServiceSettings.getDevelopmentStorageAccountSettings = function (proxyUri) { + if (!proxyUri) { + proxyUri = StorageServiceClientConstants.DEV_STORE_URI; + } + + var parsedUri = url.parse(proxyUri); + var scheme = parsedUri.protocol; + var host = parsedUri.host; + var prefix = scheme + '//' + host; + + var blobEndpoint = { + primaryHost: prefix + ':10000' + '/' + StorageServiceClientConstants.DEVSTORE_STORAGE_ACCOUNT, + secondaryHost: prefix + ':10000' + '/' + StorageServiceClientConstants.DEVSTORE_STORAGE_ACCOUNT + '-secondary' + }; + + var queueEndpoint = { + primaryHost: prefix + ':10001' + '/' + StorageServiceClientConstants.DEVSTORE_STORAGE_ACCOUNT, + secondaryHost: prefix + ':10001' + '/' + StorageServiceClientConstants.DEVSTORE_STORAGE_ACCOUNT + '-secondary' + }; + + var tableEndpoint = { + primaryHost: prefix + ':10002' + '/' + StorageServiceClientConstants.DEVSTORE_STORAGE_ACCOUNT, + secondaryHost: prefix + ':10002' + '/' + StorageServiceClientConstants.DEVSTORE_STORAGE_ACCOUNT + '-secondary' + }; + + return new StorageServiceSettings( + StorageServiceClientConstants.DEVSTORE_STORAGE_ACCOUNT, + StorageServiceClientConstants.DEVSTORE_STORAGE_ACCESS_KEY, + null, + blobEndpoint, + queueEndpoint, + tableEndpoint, + null, + true + ); +}; + +/** +* Creates StorageServiceSettings object given endpoints uri. +* +* @ignore +* @param {array} settings The service settings. +* @param {string} blobEndpointUri The blob endpoint uri. +* @param {string} queueEndpointUri The queue endpoint uri. +* @param {string} tableEndpointUri The table endpoint uri. +* @param {string} fileEndpointUri The file endpoint uri. +* @return {StorageServiceSettings} +*/ +StorageServiceSettings._createStorageServiceSettings = function (settings) { + var standardizeHost = function (host, accountName, scheme, dns){ + var storageHost; + if (host) { + storageHost = {}; + storageHost.primaryHost = _.isString(host) ? host : host.primaryHost; + storageHost.secondaryHost = _.isString(host) ? undefined : host.secondaryHost; + } + + if (scheme && accountName && dns) { + storageHost = storageHost ? storageHost : {}; + storageHost.primaryHost = storageHost.primaryHost ? storageHost.primaryHost : url.format({ protocol: scheme, hostname: accountName + '.' + dns}); + storageHost.secondaryHost = storageHost.secondaryHost ? storageHost.secondaryHost : url.format({ protocol: scheme, hostname: accountName + '-secondary.' + dns}); + } + + return storageHost; + }; + + var scheme = util.tryGetValueInsensitive( + ConnectionStringKeys.DEFAULT_ENDPOINTS_PROTOCOL_NAME, + settings + ); + + var accountName = util.tryGetValueInsensitive( + ConnectionStringKeys.ACCOUNT_NAME_NAME, + settings + ); + + var accountKey = util.tryGetValueInsensitive( + ConnectionStringKeys.ACCOUNT_KEY_NAME, + settings + ); + + var sasToken = util.tryGetValueInsensitive( + ConnectionStringKeys.SHARED_ACCESS_SIGNATURE_NAME, + settings + ); + + var endpointSuffix = util.tryGetValueInsensitive( + ConnectionStringKeys.ENDPOINT_SUFFIX_NAME, + settings + ); + + var token = util.tryGetValueInsensitive( + 'token', + settings + ); + + var blobEndpoint = standardizeHost( + util.tryGetValueInsensitive(ConnectionStringKeys.BLOB_ENDPOINT_NAME, settings), + accountName, + scheme, + endpointSuffix ? 'blob.' + endpointSuffix : StorageServiceClientConstants.CLOUD_BLOB_HOST); + + var queueEndpoint = standardizeHost( + util.tryGetValueInsensitive(ConnectionStringKeys.QUEUE_ENDPOINT_NAME, settings), + accountName, + scheme, + endpointSuffix ? 'queue.' + endpointSuffix : StorageServiceClientConstants.CLOUD_QUEUE_HOST); + + var tableEndpoint = standardizeHost( + util.tryGetValueInsensitive(ConnectionStringKeys.TABLE_ENDPOINT_NAME, settings), + accountName, + scheme, + endpointSuffix ? 'table.' + endpointSuffix : StorageServiceClientConstants.CLOUD_TABLE_HOST); + + var fileEndpoint = standardizeHost( + util.tryGetValueInsensitive(ConnectionStringKeys.FILE_ENDPOINT_NAME, settings), + accountName, + scheme, + endpointSuffix ? 'file.' + endpointSuffix : StorageServiceClientConstants.CLOUD_FILE_HOST); + + + return new StorageServiceSettings( + accountName, + accountKey, + sasToken, + blobEndpoint, + queueEndpoint, + tableEndpoint, + fileEndpoint, + token + ); +}; + +StorageServiceSettings.validKeys = validKeys; + +exports = module.exports = StorageServiceSettings; + +/***/ }), + +/***/ 99036: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var crypto = __nccwpck_require__(6113); + +/** +* Creates a new HmacSHA256Sign object. +* +* @constructor +*/ +function HmacSha256Sign(accessKey) { + this._accessKey = accessKey; + this._decodedAccessKey = Buffer.from(this._accessKey, 'base64'); +} + +/** +* Computes a signature for the specified string using the HMAC-SHA256 algorithm. +* +* @param {string} stringToSign The UTF-8-encoded string to sign. +* @return A String that contains the HMAC-SHA256-encoded signature. +*/ +HmacSha256Sign.prototype.sign = function (stringToSign) { + // Encoding the Signature + // Signature=Base64(HMAC-SHA256(UTF8(StringToSign))) + + return crypto.createHmac('sha256', this._decodedAccessKey).update(stringToSign, 'utf-8').digest('base64'); +}; + +module.exports = HmacSha256Sign; + +/***/ }), + +/***/ 68327: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +var Constants = __nccwpck_require__(60658); +var SR = __nccwpck_require__(13497); + +/** +* Creates a new SharedAccessSignature object. +* +* @constructor +* @param {string} sasToken The sasToken. +*/ +function SharedAccessSignature(sasToken) { + this.sasToken = sasToken; +} + +/** +* Signs a request with the signature header. +* +* @this {SharedAccessSignature} +* @param {WebResource} The webresource to be signed. +* @param {function(error)} callback The callback function. +*/ +SharedAccessSignature.prototype.signRequest = function (webResource, callback) { + if (webResource.uri.indexOf('?') === -1) { + webResource.uri += '?'; + } else { + webResource.uri += '&'; + } + + webResource.uri += this.sasToken; + + // Add the api-version + if (this.sasToken.indexOf('api-version') == -1) { + webResource.uri += '&' + Constants.QueryStringConstants.API_VERSION + '=' + Constants.HeaderConstants.TARGET_STORAGE_VERSION; + } else { + throw new SyntaxError(SR.INVALID_SAS_TOKEN); + } + callback(null); +}; + +module.exports = SharedAccessSignature; + + +/***/ }), + +/***/ 26130: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var _ = __nccwpck_require__(7404); +var qs = __nccwpck_require__(63477); + +var azureutil = __nccwpck_require__(3396); +var HmacSha256Sign = __nccwpck_require__(99036); +var SR = __nccwpck_require__(13497); +var errors = __nccwpck_require__(12528); +var ArgumentError = errors.ArgumentError; + +var Constants = __nccwpck_require__(60658); +var HeaderConstants = Constants.HeaderConstants; +var QueryStringConstants = Constants.QueryStringConstants; +var HeaderConstants = Constants.HeaderConstants; +var CompatibleVersionConstants = Constants.CompatibleVersionConstants; + +/** +* Creates a new SharedKey object. +* +* @constructor +* @param {string} storageAccount The storage account. +* @param {string} storageAccessKey The storage account's access key. +* @param {bool} usePathStyleUri Boolean value indicating if the path, or the hostname, should include the storage account. +*/ +function SharedKey(storageAccount, storageAccessKey, usePathStyleUri) { + this.storageAccount = storageAccount; + this.storageAccessKey = storageAccessKey; + this.usePathStyleUri = usePathStyleUri; + this.signer = new HmacSha256Sign(storageAccessKey); +} + +/** +* Generates the shared access signature for a account. +* For more detailed information, refer to https://msdn.microsoft.com/en-us/library/azure/mt584140.aspx +* +* @param {object} sharedAccessPolicy The shared access policy. +* @param {SharedAccessServices} sharedAccessPolicy.AccessPolicy.Services The services (blob, file, queue, table) for a shared access signature associated with this shared access policy. +* Refer to `Constants.AccountSasConstants.Services`. +* @param {SharedAccessResourceTypes} sharedAccessPolicy.AccessPolicy.ResourceTypes The resource type for a shared access signature associated with this shared access policy. +* Refer to `Constants.AccountSasConstants.Resources`. +* @param {SharedAccessPermissions} sharedAccessPolicy.AccessPolicy.Permissions The permissions for a shared access signature. +* Refer to `Constants.AccountSasConstants.Permissions`. +* @param {date} sharedAccessPolicy.AccessPolicy.Start The time at which the Shared Access Signature becomes valid. +* @param {date} sharedAccessPolicy.AccessPolicy.Expiry The time at which the Shared Access Signature becomes expired. +* @param {string} sharedAccessPolicy.AccessPolicy.IPAddressOrRange An IP address or a range of IP addresses from which to accept requests. When specifying a range, note that the range is inclusive. +* @param {string} sharedAccessPolicy.AccessPolicy.Protocols The protocols permitted for a request made with the account SAS. +* Possible values are both HTTPS and HTTP (https,http) or HTTPS only (https). The default value is https,http. +* Refer to `Constants.AccountSasConstants.Protocols`. +* @return {string} The shared access signature. +*/ +SharedKey.prototype.generateAccountSignedQueryString = function (sharedAccessPolicy) { + var addIfNotNull = function (queryString, name, value) { + if (!azureutil.objectIsNull(name) && !azureutil.objectIsNull(value)) { + queryString[name] = value; + } + }; + + var formatAccessPolicyDates = function (accessPolicy) { + if (!azureutil.objectIsNull(accessPolicy.Start)) { + if (!_.isDate(accessPolicy.Start)) { + accessPolicy.Start = new Date(accessPolicy.Start); + } + + accessPolicy.Start = azureutil.truncatedISO8061Date(accessPolicy.Start); + } + + if (!azureutil.objectIsNull(accessPolicy.Expiry)) { + if (!_.isDate(accessPolicy.Expiry)) { + accessPolicy.Expiry = new Date(accessPolicy.Expiry); + } + + accessPolicy.Expiry = azureutil.truncatedISO8061Date(accessPolicy.Expiry); + } + }; + + var queryString = {}; + + addIfNotNull(queryString, QueryStringConstants.SIGNED_VERSION, HeaderConstants.TARGET_STORAGE_VERSION); + + // add shared access policy params + if (sharedAccessPolicy.AccessPolicy) { + formatAccessPolicyDates(sharedAccessPolicy.AccessPolicy); + + addIfNotNull(queryString, QueryStringConstants.SIGNED_SERVICES, sharedAccessPolicy.AccessPolicy.Services); + addIfNotNull(queryString, QueryStringConstants.SIGNED_RESOURCE_TYPES, sharedAccessPolicy.AccessPolicy.ResourceTypes); + addIfNotNull(queryString, QueryStringConstants.SIGNED_PERMISSIONS, sharedAccessPolicy.AccessPolicy.Permissions); + addIfNotNull(queryString, QueryStringConstants.SIGNED_START, sharedAccessPolicy.AccessPolicy.Start); + addIfNotNull(queryString, QueryStringConstants.SIGNED_EXPIRY, sharedAccessPolicy.AccessPolicy.Expiry); + addIfNotNull(queryString, QueryStringConstants.SIGNED_IP, sharedAccessPolicy.AccessPolicy.IPAddressOrRange); + addIfNotNull(queryString, QueryStringConstants.SIGNED_PROTOCOL, sharedAccessPolicy.AccessPolicy.Protocols); + } + + // add signature + addIfNotNull(queryString, QueryStringConstants.SIGNATURE, this._generateAccountSharedAccessSignature(sharedAccessPolicy)); + + return qs.stringify(queryString); +}; + + +/** +* Generates the signature part of the shared access signature for a account. +* For more detailed information, refer to https://msdn.microsoft.com/en-us/library/azure/mt584140.aspx +* +* @param {object} sharedAccessPolicy The shared access policy. +* @param {SharedAccessServices} sharedAccessPolicy.AccessPolicy.Services The services (blob, file, queue, table) for a shared access signature associated with this shared access policy. +* Refer to `Constants.AccountSasConstants.Services`. +* @param {SharedAccessResourceTypes} sharedAccessPolicy.AccessPolicy.ResourceTypes The resource type for a shared access signature associated with this shared access policy. +* Refer to `Constants.AccountSasConstants.ResourceTypes`. +* @param {SharedAccessPermissions} sharedAccessPolicy.AccessPolicy.Permissions The permissions for a shared access signature. +* Refer to `Constants.AccountSasConstants.Permissions`. +* @param {date} sharedAccessPolicy.AccessPolicy.Start The time at which the Shared Access Signature becomes valid. +* @param {date} sharedAccessPolicy.AccessPolicy.Expiry The time at which the Shared Access Signature becomes expired. +* @param {string} sharedAccessPolicy.AccessPolicy.IPAddressOrRange An IP address or a range of IP addresses from which to accept requests. When specifying a range, note that the range is inclusive. +* @param {string} sharedAccessPolicy.AccessPolicy.Protocols The protocols permitted for a request made with the account SAS. +* Possible values are both HTTPS and HTTP (https,http) or HTTPS only (https). The default value is https,http. +* Refer to `Constants.AccountSasConstants.Protocols`. +* @return {string} The signature part of the shared access signature. +*/ +SharedKey.prototype._generateAccountSharedAccessSignature = function(sharedAccessPolicy){ + var getvalueToAppend = function (value, noNewLine) { + var returnValue = ''; + if (!azureutil.objectIsNull(value)) { + returnValue = value; + } + + if (noNewLine !== true) { + returnValue += '\n'; + } + + return returnValue; + }; + + var stringToSign = getvalueToAppend(this.storageAccount) + + getvalueToAppend(sharedAccessPolicy.AccessPolicy ? sharedAccessPolicy.AccessPolicy.Permissions : '') + + getvalueToAppend(sharedAccessPolicy.AccessPolicy ? sharedAccessPolicy.AccessPolicy.Services : '') + + getvalueToAppend(sharedAccessPolicy.AccessPolicy ? sharedAccessPolicy.AccessPolicy.ResourceTypes : '') + + getvalueToAppend(sharedAccessPolicy.AccessPolicy ? sharedAccessPolicy.AccessPolicy.Start : '') + + getvalueToAppend(sharedAccessPolicy.AccessPolicy ? sharedAccessPolicy.AccessPolicy.Expiry : '') + + getvalueToAppend(sharedAccessPolicy.AccessPolicy ? sharedAccessPolicy.AccessPolicy.IPAddressOrRange : '') + + getvalueToAppend(sharedAccessPolicy.AccessPolicy ? sharedAccessPolicy.AccessPolicy.Protocols : '') + + getvalueToAppend(HeaderConstants.TARGET_STORAGE_VERSION); + + return this.signer.sign(stringToSign); +}; + +/** +* Signs a request with the Authentication header. +* +* @param {WebResource} webResource The webresource to be signed. +* @param {function(error)} callback The callback function. +*/ +SharedKey.prototype.signRequest = function (webResource, callback) { + var getvalueToAppend = function (value, headerName) { + // Do not sign content-length 0 in 2014-08-16 and later + if (headerName === HeaderConstants.CONTENT_LENGTH && (azureutil.objectIsNull(value[headerName]) || value[headerName].toString() === '0')) { + return '\n'; + } else if (azureutil.objectIsNull(value) || azureutil.objectIsNull(value[headerName])) { + return '\n'; + } else { + return value[headerName] + '\n'; + } + }; + + var stringToSign = + webResource.method + '\n' + + getvalueToAppend(webResource.headers, HeaderConstants.CONTENT_ENCODING) + + getvalueToAppend(webResource.headers, HeaderConstants.CONTENT_LANGUAGE) + + getvalueToAppend(webResource.headers, HeaderConstants.CONTENT_LENGTH) + + getvalueToAppend(webResource.headers, HeaderConstants.CONTENT_MD5) + + getvalueToAppend(webResource.headers, HeaderConstants.CONTENT_TYPE) + + getvalueToAppend(webResource.headers, HeaderConstants.DATE) + + getvalueToAppend(webResource.headers, HeaderConstants.IF_MODIFIED_SINCE) + + getvalueToAppend(webResource.headers, HeaderConstants.IF_MATCH) + + getvalueToAppend(webResource.headers, HeaderConstants.IF_NONE_MATCH) + + getvalueToAppend(webResource.headers, HeaderConstants.IF_UNMODIFIED_SINCE) + + getvalueToAppend(webResource.headers, HeaderConstants.RANGE) + + this._getCanonicalizedHeaders(webResource) + + this._getCanonicalizedResource(webResource); + + var signature = this.signer.sign(stringToSign); + + webResource.withHeader(HeaderConstants.AUTHORIZATION, 'SharedKey ' + this.storageAccount + ':' + signature); + callback(null); +}; + +/* +* Retrieves the webresource's canonicalized resource string. +* @param {WebResource} webResource The webresource to get the canonicalized resource string from. +* @return {string} The canonicalized resource string. +*/ +SharedKey.prototype._getCanonicalizedResource = function (webResource) { + var path = '/'; + if (webResource.path) { + path = webResource.path; + } + + var canonicalizedResource = '/' + this.storageAccount + path; + + // Get the raw query string values for signing + var queryStringValues = webResource.queryString; + + // Build the canonicalized resource by sorting the values by name. + if (queryStringValues) { + var paramNames = []; + Object.keys(queryStringValues).forEach(function (n) { + paramNames.push(n); + }); + + paramNames = paramNames.sort(); + Object.keys(paramNames).forEach(function (name) { + canonicalizedResource += '\n' + paramNames[name] + ':' + queryStringValues[paramNames[name]]; + }); + } + + return canonicalizedResource; +}; + +/* +* Constructs the Canonicalized Headers string. +* +* To construct the CanonicalizedHeaders portion of the signature string, +* follow these steps: 1. Retrieve all headers for the resource that begin +* with x-ms-, including the x-ms-date header. 2. Convert each HTTP header +* name to lowercase. 3. Sort the headers lexicographically by header name, +* in ascending order. Each header may appear only once in the +* string. 4. Unfold the string by replacing any breaking white space with a +* single space. 5. Trim any white space around the colon in the header. 6. +* Finally, append a new line character to each canonicalized header in the +* resulting list. Construct the CanonicalizedHeaders string by +* concatenating all headers in this list into a single string. +* +* @param {object} The webresource object. +* @return {string} The canonicalized headers. +*/ +SharedKey.prototype._getCanonicalizedHeaders = function (webResource) { + // Build canonicalized headers + var canonicalizedHeaders = ''; + if (webResource.headers) { + var canonicalizedHeadersArray = []; + for (var header in webResource.headers) { + if (header.indexOf(HeaderConstants.PREFIX_FOR_STORAGE) === 0) { + var headerItem = { canonicalized: header.toLowerCase(), original: header }; + canonicalizedHeadersArray.push(headerItem); + } + } + + canonicalizedHeadersArray.sort(function(a, b) { return a.canonicalized.localeCompare(b.canonicalized); }); + + _.each(canonicalizedHeadersArray, function (currentHeaderItem) { + var value = webResource.headers[currentHeaderItem.original]; + if (!azureutil.IsNullOrEmptyOrUndefinedOrWhiteSpace(value)) { + canonicalizedHeaders += currentHeaderItem.canonicalized + ':' + value + '\n'; + } else { + canonicalizedHeaders += currentHeaderItem.canonicalized + ':\n'; + } + }); + } + + return canonicalizedHeaders; +}; + +/** +* Generates the query string for a shared access signature signing. +* +* @this {SharedAccessSignature} +* @param {string} serviceType The service type. +* @param {string} path The path to the resource. +* @param {object} sharedAccessPolicy The shared access policy. +* @param {string} [sharedAccessPolicy.Id] The signed identifier. +* @param {SharedAccessPermissions} [sharedAccessPolicy.AccessPolicy.Permissions] The permission type. +* @param {date} [sharedAccessPolicy.AccessPolicy.Start] The time at which the Shared Access Signature becomes valid. +* @param {date} [sharedAccessPolicy.AccessPolicy.Expiry] The time at which the Shared Access Signature becomes expired. +* @param {string} [sharedAccessPolicy.AccessPolicy.IPAddressOrRange] An IP address or a range of IP addresses from which to accept requests. When specifying a range, note that the range is inclusive. +* @param {string} [sharedAccessPolicy.AccessPolicy.Protocols] The protocols permitted for a request made with the account SAS. +* Possible values are both HTTPS and HTTP (https,http) or HTTPS only (https). The default value is https,http. +* @param {string} sasVersion A string indicating the desired SAS Version to use, in storage service version format. Value must be 2012-02-12 or later. +* @parma {ResourceTypes} [args.resourceType] The resource type, if the resource is a blob or container. Null if the resource is a queue or table. +* @parma {ResourceTypes} [args.tableName] The table name, if the resource is a table. Null if the resource is a blob orqueue. +* @parma {ResourceTypes} [args.queryString] The query string, if additional parameters are desired. +* @param {object} [args.headers] The optional header values to set for a blob returned wth this SAS. +* @param {string} [args.headers.CacheControl] The value of the Cache-Control response header to be returned when this SAS is used. +* @param {string} [args.headers.ContentType] The value of the Content-Type response header to be returned when this SAS is used. +* @param {string} [args.headers.ContentEncoding] The value of the Content-Encoding response header to be returned when this SAS is used. +* @param {string} [args.headers.ContentLanguage] The value of the Content-Language response header to be returned when this SAS is used. +* @param {string} [args.headers.ContentDisposition] The value of the Content-Disposition response header to be returned when this SAS is used. +* @return {object} The shared access signature query string. +*/ +SharedKey.prototype.generateSignedQueryString = function (serviceType, path, sharedAccessPolicy, sasVersion, args) { + var addIfNotNull = function (queryString, name, value) { + if (!azureutil.objectIsNull(name) && !azureutil.objectIsNull(value)) { + queryString[name] = value; + } + }; + + var validateVersion = function (sasVersion) { + // validate and add version + if (azureutil.objectIsNull(sasVersion)) { + return HeaderConstants.TARGET_STORAGE_VERSION; + } else { + var values = _.values(CompatibleVersionConstants); + if (values.some(function(version) { + return version.toLowerCase() === sasVersion.toLowerCase(); + })) { + return sasVersion; + } else { + throw new ArgumentError('sasVersion', azureutil.stringFormat(SR.INVALID_SAS_VERSION, sasVersion, values)); + } + } + }; + + var formatAccessPolicyDates = function (accessPolicy) { + if (!azureutil.objectIsNull(accessPolicy.Start)) { + if (!_.isDate(accessPolicy.Start)) { + accessPolicy.Start = new Date(accessPolicy.Start); + } + + accessPolicy.Start = azureutil.truncatedISO8061Date(accessPolicy.Start); + } + + if (!azureutil.objectIsNull(accessPolicy.Expiry)) { + if (!_.isDate(accessPolicy.Expiry)) { + accessPolicy.Expiry = new Date(accessPolicy.Expiry); + } + + accessPolicy.Expiry = azureutil.truncatedISO8061Date(accessPolicy.Expiry); + } + }; + + // set up optional args + var queryString; + var resourceType; + var headers; + var tableName; + + if(args) { + queryString = args.queryString; + resourceType = args.resourceType; + tableName = args.tableName; + headers = args.headers; + } + + if(!queryString) { + queryString = {}; + } + + // add shared access policy params + if (sharedAccessPolicy.AccessPolicy) { + formatAccessPolicyDates(sharedAccessPolicy.AccessPolicy); + + addIfNotNull(queryString, QueryStringConstants.SIGNED_START, sharedAccessPolicy.AccessPolicy.Start); + addIfNotNull(queryString, QueryStringConstants.SIGNED_EXPIRY, sharedAccessPolicy.AccessPolicy.Expiry); + addIfNotNull(queryString, QueryStringConstants.SIGNED_PERMISSIONS, sharedAccessPolicy.AccessPolicy.Permissions); + addIfNotNull(queryString, QueryStringConstants.SIGNED_IP, sharedAccessPolicy.AccessPolicy.IPAddressOrRange); + addIfNotNull(queryString, QueryStringConstants.SIGNED_PROTOCOL, sharedAccessPolicy.AccessPolicy.Protocols); + + // tables only + addIfNotNull(queryString, QueryStringConstants.STARTPK, sharedAccessPolicy.AccessPolicy.StartPk); + addIfNotNull(queryString, QueryStringConstants.ENDPK, sharedAccessPolicy.AccessPolicy.EndPk); + addIfNotNull(queryString, QueryStringConstants.STARTRK, sharedAccessPolicy.AccessPolicy.StartRk); + addIfNotNull(queryString, QueryStringConstants.ENDRK, sharedAccessPolicy.AccessPolicy.EndRk); + } + + // validate and add version + var validatedSASVersionString = validateVersion(sasVersion); + addIfNotNull(queryString, QueryStringConstants.SIGNED_VERSION, validatedSASVersionString); + + // add signed identifier + addIfNotNull(queryString, QueryStringConstants.SIGNED_IDENTIFIER, sharedAccessPolicy.Id); + + // blobs only + addIfNotNull(queryString, QueryStringConstants.SIGNED_RESOURCE, resourceType); + if (headers) { + addIfNotNull(queryString, QueryStringConstants.CACHE_CONTROL, headers.cacheControl); + addIfNotNull(queryString, QueryStringConstants.CONTENT_TYPE, headers.contentType); + addIfNotNull(queryString, QueryStringConstants.CONTENT_ENCODING, headers.contentEncoding); + addIfNotNull(queryString, QueryStringConstants.CONTENT_LANGUAGE, headers.contentLanguage); + addIfNotNull(queryString, QueryStringConstants.CONTENT_DISPOSITION, headers.contentDisposition); + } + + // tables only + addIfNotNull(queryString, QueryStringConstants.TABLENAME, tableName); + + // add signature + addIfNotNull(queryString, QueryStringConstants.SIGNATURE, this._generateSignature(serviceType, path, sharedAccessPolicy, validatedSASVersionString, {resourceType: resourceType, headers: headers, tableName: tableName})); + + return qs.stringify(queryString); +}; + +/** +* Generates the shared access signature for a resource. +* +* @this {SharedAccessSignature} +* @param {string} serviceType The service type. +* @param {string} path The path to the resource. +* @param {object} sharedAccessPolicy The shared access policy. +* @param {string} [sharedAccessPolicy.Id] The signed identifier. +* @param {SharedAccessPermissions} [sharedAccessPolicy.AccessPolicy.Permissions] The permission type. +* @param {date} [sharedAccessPolicy.AccessPolicy.Start] The time at which the Shared Access Signature becomes valid. +* @param {date} [sharedAccessPolicy.AccessPolicy.Expiry] The time at which the Shared Access Signature becomes expired. +* @param {string} [sharedAccessPolicy.AccessPolicy.IPAddressOrRange] An IP address or a range of IP addresses from which to accept requests. When specifying a range, note that the range is inclusive. +* @param {string} [sharedAccessPolicy.AccessPolicy.Protocols] The protocols permitted for a request made with the account SAS. +* Possible values are both HTTPS and HTTP (https,http) or HTTPS only (https). The default value is https,http. +* @param {string} sasVersion A string indicating the desired SAS Version to use, in storage service version format. Value must be 2012-02-12 or later. +* @parma {ResourceTypes} [args.resourceType] The resource type, if the resource is a blob or container. Null if the resource is a queue or table. +* @parma {ResourceTypes} [args.tableName] The table name, if the resource is a table. Null if the resource is a blob or queue. +* @param {object} [args.headers] The optional header values to set for a blob returned wth this SAS. +* @param {string} [args.headers.CacheControl] The value of the Cache-Control response header to be returned when this SAS is used. +* @param {string} [args.headers.ContentType] The value of the Content-Type response header to be returned when this SAS is used. +* @param {string} [args.headers.ContentEncoding] The value of the Content-Encoding response header to be returned when this SAS is used. +* @param {string} [args.headers.ContentLanguage] The value of the Content-Language response header to be returned when this SAS is used. +* @param {string} [args.headers.ContentDisposition] The value of the Content-Disposition response header to be returned when this SAS is used. +* @return {string} The shared access signature. +*/ +SharedKey.prototype._generateSignature = function (serviceType, path, sharedAccessPolicy, sasVersion, args) { + var getvalueToAppend = function (value, noNewLine) { + var returnValue = ''; + if (!azureutil.objectIsNull(value)) { + returnValue = value; + } + + if (noNewLine !== true) { + returnValue += '\n'; + } + + return returnValue; + }; + + // set up optional args + var resourceType; + var tableName; + var headers; + if(args) { + resourceType = args.resourceType; + tableName = args.tableName; + headers = args.headers; + } + + // Add leading slash to path + if (path.substr(0, 1) !== '/') { + path = '/' + path; + } + + var canonicalizedResource; + if (sasVersion === CompatibleVersionConstants.FEBRUARY_2012 || sasVersion === CompatibleVersionConstants.AUGUST_2013) { + // Do not prepend service name for older versions + canonicalizedResource = '/' + this.storageAccount + path; + } else { + canonicalizedResource = '/' + serviceType + '/' + this.storageAccount + path; + } + + var stringToSign = getvalueToAppend(sharedAccessPolicy.AccessPolicy ? sharedAccessPolicy.AccessPolicy.Permissions : '') + + getvalueToAppend(sharedAccessPolicy.AccessPolicy ? sharedAccessPolicy.AccessPolicy.Start : '') + + getvalueToAppend(sharedAccessPolicy.AccessPolicy ? sharedAccessPolicy.AccessPolicy.Expiry : '') + + getvalueToAppend(canonicalizedResource) + + getvalueToAppend(sharedAccessPolicy.Id) + + getvalueToAppend(sharedAccessPolicy.AccessPolicy ? sharedAccessPolicy.AccessPolicy.IPAddressOrRange : '') + + getvalueToAppend(sharedAccessPolicy.AccessPolicy ? sharedAccessPolicy.AccessPolicy.Protocols : '') + + sasVersion; + + if(sasVersion == CompatibleVersionConstants.FEBRUARY_2012) { + if(headers) { + throw new ArgumentError('args.headers', SR.INVALID_HEADERS); + } + } else if (resourceType) { + stringToSign += '\n' + + getvalueToAppend(headers ? headers.cacheControl : '') + + getvalueToAppend(headers ? headers.contentDisposition : '') + + getvalueToAppend(headers ? headers.contentEncoding : '') + + getvalueToAppend(headers ? headers.contentLanguage : '') + + getvalueToAppend(headers ? headers.contentType : '', true); + } + + if(tableName) { + stringToSign += '\n' + + getvalueToAppend(sharedAccessPolicy.AccessPolicy ? sharedAccessPolicy.AccessPolicy.StartPk : '') + + getvalueToAppend(sharedAccessPolicy.AccessPolicy ? sharedAccessPolicy.AccessPolicy.StartRk : '') + + getvalueToAppend(sharedAccessPolicy.AccessPolicy ? sharedAccessPolicy.AccessPolicy.EndPk : '') + + getvalueToAppend(sharedAccessPolicy.AccessPolicy ? sharedAccessPolicy.AccessPolicy.EndRk : '', true); + } + + return this.signer.sign(stringToSign); +}; + +module.exports = SharedKey; + +/***/ }), + +/***/ 7266: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +var Constants = __nccwpck_require__(60658); +var HeaderConstants = Constants.HeaderConstants; + +/** +* Creates a new TokenSigner object. +* +* @constructor +* @param {TokenCredential} tokenCredential The token credential, such as containing an OAuth access token. +*/ +function TokenSigner (tokenCredential) { + this.tokenCredential = tokenCredential; +} + +/** +* Signs a request with the Authentication header. +* +* @param {WebResource} webResource The webresource to be signed. +* @param {function(error)} callback The callback function. +*/ +TokenSigner.prototype.signRequest = function (webResource, callback) { + webResource.withHeader(HeaderConstants.AUTHORIZATION, 'Bearer ' + this.tokenCredential.get()); + callback(null); +}; + +module.exports = TokenSigner; + +/***/ }), + +/***/ 36587: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +var util = __nccwpck_require__(73837); +var http = __nccwpck_require__(13685); +var https = __nccwpck_require__(95687); +var EventEmitter = (__nccwpck_require__(82361).EventEmitter); +var os = __nccwpck_require__(22037); + +var azureutil = __nccwpck_require__(3396); +var Logger = __nccwpck_require__(98256); +var Constants = __nccwpck_require__(60658); +var errors = __nccwpck_require__(12528); +var ArgumentError = errors.ArgumentError; + +var DEFAULT_OPERATION_MEMORY_USAGE = Constants.BlobConstants.DEFAULT_WRITE_BLOCK_SIZE_IN_BYTES; +var DEFAULT_CRITICAL_MEMORY_LIMITATION_32_IN_BYTES = Constants.BlobConstants.DEFAULT_CRITICAL_MEMORY_LIMITATION_32_IN_BYTES; +var DEFAULT_CRITICAL_MEMORY_LIMITATION_BROWSER_IN_BYTES = Constants.BlobConstants.DEFAULT_CRITICAL_MEMORY_LIMITATION_BROWSER_IN_BYTES; +var DEFAULT_MINIMUM_MEMORY_USAGE_BROWSER_IN_BYTES = Constants.BlobConstants.DEFAULT_MINIMUM_MEMORY_USAGE_BROWSER_IN_BYTES; +var DEFAULT_GLOBAL_CONCURRENCY = 5; //Default http connection limitation for nodejs + +var SystemTotalMemory = os.totalmem(); +var CriticalFreeMemory = 0.1 * SystemTotalMemory; +var nodeVersion = azureutil.getNodeVersion(); + +/** +* Concurrently execute batch operations and call operation callback randomly or in sequence. +* Random mode is for uploading. +* 1. Fire user callback when the operation is done. +* Sequence mode is for downloading. +* 1. Fire user callback when the operation is done and all previous operations and callback has finished. +* 2. BatchOperation guarantees the user callback is fired one by one. +* 3. The next user callback can't be fired until the current one is completed. +*/ +function BatchOperation(name, options) { + if (!options) { + options = {}; + } + + this.name = name; + this.logger = options.logger || new Logger(Logger.LogLevels.INFO); + this.operationMemoryUsage = options.operationMemoryUsage || DEFAULT_OPERATION_MEMORY_USAGE; + this.callbackInOrder = options.callbackInOrder === true; + this.callInOrder = options.callInOrder === true; + this._currentOperationId = this.callbackInOrder ? 1 : -1; + this.concurrency = DEFAULT_GLOBAL_CONCURRENCY; + this.enableReuseSocket = (nodeVersion.major > 0 || nodeVersion.minor >= 10) && options.enableReuseSocket; + + this._emitter = new EventEmitter(); + this._enableComplete = false; + this._ended = false; + this._error = null; + this._paused = false; + + //Total operations count(queued and active and connected) + this._totalOperation = 0; + + //Action operations count(The operations which are connecting to remote or executing callback or queued for executing) + this._activeOperation = 0; + + //Queued operations count(The operations which are connecting to remote or queued for executing) + this._queuedOperation = 0; + + //finished operation should be removed from this array + this._operations = []; +} + +/** +* Operation state +*/ +var OperationState = { + INITED : 'inited', + QUEUED : 'queued', + RUNNING : 'running', + COMPLETE : 'complete', + CALLBACK : 'callback', + ERROR : 'error' +}; + +BatchOperation.OperationState = OperationState; + +/** +* Set batch operation concurrency +*/ +BatchOperation.prototype.setConcurrency = function(concurrency) { + if (concurrency) { + this.concurrency = concurrency; + http.Agent.maxSockets = this.concurrency; + https.Agent.maxSockets = this.concurrency; + } +}; + +/** +* Is the workload heavy and It can be used to determine whether we could queue operations +*/ +BatchOperation.prototype.IsWorkloadHeavy = function() { + //Only support one batch operation for now. + //In order to work with the multiple batch operation, we can use global operation track objects + //BatchOperation acquire a bunch of operation ids from global and allocated ids to RestOperation + //RestOperation start to run in order of id + var sharedRequest = 1; + if(this.enableReuseSocket && !this.callInOrder) { + sharedRequest = 2; + } + return this._activeOperation >= sharedRequest * this.concurrency || this._isLowMemory(); +}; + +/** +* Get the approximate memory usage for batch operation. +*/ +BatchOperation.prototype._getApproximateMemoryUsage = function() { + var currentUsage = azureutil.isBrowser() ? DEFAULT_MINIMUM_MEMORY_USAGE_BROWSER_IN_BYTES : process.memoryUsage().rss; // Currently, we cannot get memory usage in browsers + var futureUsage = this._queuedOperation * this.operationMemoryUsage; + return currentUsage + futureUsage; +}; + +/** +* Return whether in a low memory situation. +*/ +BatchOperation.prototype._isLowMemory = function() { + var approximateMemoryUsage = this._getApproximateMemoryUsage(); + return os.freemem() < CriticalFreeMemory || + (this._activeOperation >= this.concurrency && approximateMemoryUsage > 0.5 * SystemTotalMemory) || + (azureutil.is32() && approximateMemoryUsage > DEFAULT_CRITICAL_MEMORY_LIMITATION_32_IN_BYTES) || + (azureutil.isBrowser() && approximateMemoryUsage > DEFAULT_CRITICAL_MEMORY_LIMITATION_BROWSER_IN_BYTES); +}; + +/** +* Add a operation into batch operation +*/ +BatchOperation.prototype.addOperation = function(operation) { + this._operations.push(operation); + operation.status = OperationState.QUEUED; + operation.operationId = ++this._totalOperation; + this._queuedOperation++; + this.logger.debug(util.format('Add operation %d into batch operation %s. Active: %s; Queued: %s', operation.operationId, this.name, this._activeOperation, this._queuedOperation)); + //Immediately start the idle operation if workload isn't heavy + this._runOperation(operation); + return this.IsWorkloadHeavy(); +}; + +/** +* Enable batch operation complete when there is no operation to run. +*/ +BatchOperation.prototype.enableComplete = function() { + this._enableComplete = true; + this.logger.debug(util.format('Enable batch operation %s complete', this.name)); + this._tryEmitEndEvent(); +}; + +/** +* Stop firing user call back +*/ +BatchOperation.prototype.pause = function () { + this._paused = true; +}; + +/** +* Start firing user call back +*/ +BatchOperation.prototype.resume = function () { + if (this._paused) { + this._paused = false; + this._fireOperationUserCallback(); + } +}; + +/** +* Add event listener +*/ +BatchOperation.prototype.on = function(event, listener) { + // only emit end if the batch has completed all operations + if(this._ended && event === 'end') { + listener(); + } else { + this._emitter.on(event, listener); + } +}; + +/** +* Run operation +*/ +BatchOperation.prototype._runOperation = function (operation) { + this.logger.debug(util.format('Operation %d start to run', operation.operationId)); + var cb = this.getBatchOperationCallback(operation); + + if(this._error) { + cb(this._error);//Directly call the callback with previous error. + } else { + operation.run(cb); + } + + this._activeOperation++; +}; + +/** +* Return an general operation call back. +* This callback is used to update the internal status and fire user's callback when operation is complete. +*/ +BatchOperation.prototype.getBatchOperationCallback = function (operation) { + var self = this; + return function (error) { + self._queuedOperation--; + if (error) { + operation.status = OperationState.ERROR; + self.logger.debug(util.format('Operation %d failed. Error %s', operation.operationId, error)); + self._error = error; + } else { + operation.status = OperationState.CALLBACK; + self.logger.debug(util.format('Operation %d succeed', operation.operationId)); + } + + operation._callbackArguments = arguments; + if (self._paused) { + operation.status = OperationState.CALLBACK; + self.logger.debug(util.format('Batch operation paused and Operation %d wait for firing callback', operation.operationId)); + } else if (self.callbackInOrder) { + operation.status = OperationState.CALLBACK; + if (self._currentOperationId === operation.operationId) { + self._fireOperationUserCallback(operation); + } else if (self._currentOperationId > operation.operationId) { + throw new Error('Debug error: current callback operation id cannot be larger than operation id'); + } else { + self.logger.debug(util.format('Operation %d is waiting for firing callback %s', operation.operationId, self._currentOperationId)); + } + } else { + self._fireOperationUserCallback(operation); + } + + self._tryEmitDrainEvent(); + operation = null; + self = null; + }; +}; + +/** +* Fire user's call back +*/ +BatchOperation.prototype._fireOperationUserCallback = function (operation) { + var index = this._getCallbackOperationIndex(); + if (!operation && index != -1) { + operation = this._operations[index]; + } + + if (operation && !this._paused) { + // fire the callback, if exists + if (operation._userCallback) { + this.logger.debug(util.format('Fire user call back for operation %d', operation.operationId)); + // make sure UserCallback is a sync operation in sequence mode. + // both async and sync operations are available for random mode. + operation._fireUserCallback(); + } + + // remove the operation from the array and decrement the counter + this._operations.splice(index, 1); + this._activeOperation--; + operation.status = OperationState.COMPLETE; + index = operation = null; + + if (this.callbackInOrder) { + this._currentOperationId++; + } + + this._fireOperationUserCallback(); + } else if (this._paused) { + this._tryEmitDrainEvent(); + } else { + // check if batch has ended and if so emit end event + this._tryEmitEndEvent(); + } +}; + +/** +* Try to emit the BatchOperation end event +* End event means all the operation and callback already finished. +*/ +BatchOperation.prototype._tryEmitEndEvent = function () { + if(this._enableComplete && this._activeOperation === 0 && this._operations.length === 0) { + this._ended = true; + this.logger.debug(util.format('Batch operation %s emits the end event', this.name)); + this._emitter.emit('end', this._error, null); + return true; + } + + // Workaround to recover from the 'hang' edge case. _tryEmitEndEvent function is not supposed to be called if the bacth is not really completed. + this._tryEmitDrainEvent(); + return false; +}; + +/** +* Try to emit the drain event +*/ +BatchOperation.prototype._tryEmitDrainEvent = function () { + if (!this._emitter) return false; + if(!this.IsWorkloadHeavy() || this._activeOperation < this.concurrency) { + this._emitter.emit('drain'); + return true; + } + return false; +}; + +/** +* Get the current active operation index. +* Only the active operation could call user's callback in sequence model. +* The other finished but not active operations should wait for wake up. +*/ +BatchOperation.prototype._getCallbackOperationIndex = function () { + var operation = null; + for (var i = 0; i < this._operations.length; i++) { + operation = this._operations[i]; + if (this.callbackInOrder) { + //Sequence mode + if (operation.operationId == this._currentOperationId) { + if (operation.status === OperationState.CALLBACK) { + return i; + } else { + return -1; + } + } + } else { + //Random mode + if (operation.status === OperationState.CALLBACK) { + return i; + } + } + } + return -1; +}; + +/** +* Do nothing and directly call the callback. +* In random mode, the user callback will be called immediately +* In sequence mode, the user callback will be called after the previous callback has been called +*/ +BatchOperation.noOperation = function (cb) { + cb(); +}; + +/** +* Rest operation in sdk +*/ +function RestOperation(serviceClient, operation) { + this.status = OperationState.Inited; + this.operationId = -1; + this._callbackArguments = null; + + // setup callback and args + this._userCallback = arguments[arguments.length - 1]; + var sliceEnd = arguments.length; + if(azureutil.objectIsFunction(this._userCallback)) { + sliceEnd--; + } else { + this._userCallback = null; + } + var operationArguments = Array.prototype.slice.call(arguments).slice(2, sliceEnd); + + this.run = function(cb) { + var func = serviceClient[operation]; + if(!func) { + throw new ArgumentError('operation', util.format('Unknown operation %s in serviceclient', operation)); + } else { + if(!cb) cb = this._userCallback; + operationArguments.push(cb); + this.status = OperationState.RUNNING; + func.apply(serviceClient, operationArguments); + operationArguments = operation = null; + } + }; + + this._fireUserCallback = function () { + if(this._userCallback) { + this._userCallback.apply(null, this._callbackArguments); + } + }; +} + +BatchOperation.RestOperation = RestOperation; + +/** +* Common operation wrapper +*/ +function CommonOperation(operationFunc, callback) { + this.status = OperationState.Inited; + this.operationId = -1; + this._callbackArguments = null; + var sliceStart = 2; + if (azureutil.objectIsFunction(callback)) { + this._userCallback = callback; + } else { + this._userCallback = null; + sliceStart = 1; + } + var operationArguments = Array.prototype.slice.call(arguments).slice(sliceStart); + this.run = function (cb) { + if (!cb) cb = this._userCallback; + operationArguments.push(cb); + this.status = OperationState.RUNNING; + operationFunc.apply(null, operationArguments); + operationArguments = operationFunc = null; + }; + + this._fireUserCallback = function () { + if (this._userCallback) { + this._userCallback.apply(null, this._callbackArguments); + } + this._userCallback = this._callbackArguments = null; + }; +} + +BatchOperation.CommonOperation = CommonOperation; + +module.exports = BatchOperation; + + +/***/ }), + +/***/ 63615: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +var stream = __nccwpck_require__(12781); +var util = __nccwpck_require__(73837); + +function BufferStream(buffer, options) { + stream.Readable.call(this, options); + + this._buffer = buffer; + this._offset = 0; + this._chunkSize = 4 * 1024 * 1024; + this._bufferSize = buffer.length; +} + +util.inherits(BufferStream, stream.Readable); + +BufferStream.prototype._read = function () { + while (this.push(this._readNextChunk())) { + continue; + } +}; + +BufferStream.prototype._readNextChunk = function () { + var data = null; + + if (this._offset < this._bufferSize) { + var end = this._offset + this._chunkSize; + end = end > this._bufferSize ? this._bufferSize : end; + data = this._buffer.slice(this._offset, end); + this._offset = end; + } + + return data; +}; + +module.exports = BufferStream; + +/***/ }), + +/***/ 13548: +/***/ ((module) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +/** +* Chunked memory pool allocator. +* It could dramatically reduce the memory usage. +* However, it can't dramatically reduce the CPU time since GC in v8 is very efficient. +*/ +function ChunkAllocator(chunkSize, maxCount) { + // Track the unused buffers and number of used buffers + this._pool = []; + this._inuse = 0; + + // Buffer size + this._chunkSize = chunkSize; + + // If total memory is larger than this._chunkSize * this._maxCount, the buffer pool is not used. + this._maxCount = maxCount || 10; + + // Immediately add a buffer to the pool. + this._extendMemoryPool(); +} + +/** +* Synchronously require a buffer +* Caller should be aware of that the content of buffer is random since the Buffer.fill is Time-consumed opreation. +*/ +ChunkAllocator.prototype.getBuffer = function(size) { + var buffer = this._getBufferFromPool(size); + if (buffer === null) { + // Either the total memory is larger than this._chunkSize * this._maxCount + // Or, the size does not match the chunk size of the pool + buffer = Buffer.alloc(size); + } + + this._inuse++; + return buffer; +}; + +/** +* Get buffer from the current memory pool. +*/ +ChunkAllocator.prototype._getBufferFromPool = function(size) { + // Return null if the given size does not match the chunk size of the buffer pool. + if(size !== this._chunkSize) { + return null; + } + + // Extend the memory pool if it is empty. + if(this._pool.length === 0) { + this._extendMemoryPool(); + } + + // If the pool is not empty, return a buffer. + if(this._pool.length !== 0) { + return this._pool.pop(); + } else { + return null; + } +}; + +/** +* Extend the memory pool if the maximum size has not been reached. +*/ +ChunkAllocator.prototype._extendMemoryPool = function() { + var total = this._pool.length + this._inuse; + + // If the total is larger than the max, do not allocate more memory. + if(total >= this._maxCount) return; + + // Calculate the new number of buffers, equal to the total*2 bounded by 1 and the maxCount + var nextSize = Math.min(total * 2, this._maxCount) || 1; + + // Add more buffers. + var increment = nextSize - total; + for(var i = 0; i < increment; i++) { + var buffer = Buffer.alloc(this._chunkSize); + this._pool.push(buffer); + } +}; + +/** +* Release the buffer. +*/ +ChunkAllocator.prototype.releaseBuffer = function(buffer) { + if(buffer.length !== this._chunkSize) { + // Directly delete the buffer if bufferSize is invalid and wait for GC. + buffer = null; + return; + } + + // Add the buffer to the pool if it is not full, otherwise delete it + if (this._pool.length < this._maxCount) { + this._pool.push(buffer); + } else { + buffer = null; + } + + // Decrement _inuse + this._inuse--; + + // _inuse could be below zero if a buffer is released which was not returned by getBuffer + if(this._inuse < 0) { + this._inuse = 0; + } +}; + +/** +* Destroy ChunkAllocator. +*/ +ChunkAllocator.prototype.destroy = function() { + this._pool = []; + this._inuse = 0; +}; + +module.exports = ChunkAllocator; + + +/***/ }), + +/***/ 40473: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +var stream = __nccwpck_require__(12781); +var util = __nccwpck_require__(73837); + +var azureutil = __nccwpck_require__(3396); +var Md5Wrapper = __nccwpck_require__(11007); +var Constants = __nccwpck_require__(60658); +var bufferSize = Constants.BlobConstants.DEFAULT_WRITE_BLOCK_SIZE_IN_BYTES; + +/** +* Chunk stream +* 1. Calculate md5 +* 2. Track reading offset +* 3. Work with customize memory allocator +* 4. Buffer data from stream. +* @param {object} options stream.Readable options +*/ +function ChunkStream(options) { + stream.Stream.call(this); + this.writable = this.readable = true; + + if (!options) { + options = {}; + } + + this._highWaterMark = options.highWaterMark || bufferSize; + + this._paused = undefined; //True/false is the external status from users. + + this._isStreamOpened = false; + this._offset = 0; + this._allocator = options.allocator; + this._streamEnded = false; + this._md5hash = null; + this._buffer = null; + this._internalBufferSize = 0; + this._outputLengthLimit = 0; + this._md5sum = undefined; + + if (options.calcContentMd5) { + this._md5hash = new Md5Wrapper().createMd5Hash(); + } +} + +util.inherits(ChunkStream, stream.Stream); + +/** +* Set the memory allocator. +*/ +ChunkStream.prototype.setMemoryAllocator = function(allocator) { + this._allocator = allocator; +}; + +/** +* Set the output length. +*/ +ChunkStream.prototype.setOutputLength = function(length) { + if (length) { + this._outputLengthLimit = length; + } +}; + +/** +* Internal stream ended +*/ +ChunkStream.prototype.end = function (chunk, encoding, cb) { + if (typeof chunk === 'function') { + cb = chunk; + chunk = null; + encoding = null; + } else if (typeof encoding === 'function') { + cb = encoding; + encoding = null; + } + + if (chunk) { + this.write(chunk, encoding); + } + + this._streamEnded = true; + this._flushInternalBuffer(); + + if (cb) { + this.once('end', cb); + } + + this.emit('end'); +}; + +ChunkStream.prototype.finish = function () { + this.emit('finish'); + + this.destroy(); +}; + +ChunkStream.prototype.error = function () { + this.emit('error'); + + this.destroy(); +}; + +ChunkStream.prototype.destroy = function () { + this.writable = this.readable = false; + + if (this._allocator && azureutil.objectIsFunction(this._allocator.destroy)) { + this._allocator.destroy(); + } + + this.emit('close'); +}; + +ChunkStream.prototype.stop = function () { + this.destroy(); + this._streamEnded = true; + this.emit('end'); +}; + +/** +* Add event listener +*/ +ChunkStream.prototype.write = function (chunk, encoding) { + if (!this._isStreamOpened) { + this._isStreamOpened = true; + } + + this._buildChunk(chunk, encoding); + + return !this._paused; +}; + +/** +* Buffer the data into a chunk and emit it +*/ +ChunkStream.prototype._buildChunk = function (data) { + if (typeof data == 'string') { + data = Buffer.from(data); + } + var dataSize = data.length; + var dataOffset = 0; + do { + var buffer = null; + var targetSize = this._internalBufferSize + dataSize; + + if (targetSize < this._highWaterMark) { + // add the data to the internal buffer and return as it is not yet full + this._copyToInternalBuffer(data, dataOffset, data.length); + return; + } else if (targetSize == this._highWaterMark){ + var canReleaseInnerStreamBuffer = this._stream && this._stream._allocator && this._stream._allocator.releaseBuffer; + if(this._internalBufferSize === 0 && data.length === this._highWaterMark && !canReleaseInnerStreamBuffer) { + // set the buffer to the data passed in to avoid creating a new buffer + buffer = data; + } else { + // add the data to the internal buffer and pop that buffer + this._copyToInternalBuffer(data, dataOffset, data.length); + buffer = this._popInternalBuffer(); + } + dataSize = 0; + } else { + // add data to the internal buffer until its full, then return it + // set the dataSize parameter so that additional data is not lost + var copySize = this._highWaterMark - this._internalBufferSize; + this._copyToInternalBuffer(data, dataOffset, dataOffset + copySize); + dataSize -= copySize; + dataOffset += copySize; + buffer = this._popInternalBuffer(); + } + this._emitBufferData(buffer); + } while(dataSize > 0); +}; + +/** +* Emit the buffer +*/ +ChunkStream.prototype._emitBufferData = function(buffer) { + var newOffset = this._offset + buffer.length; + var range = { + start : this._offset, + end : newOffset - 1, + size : buffer.length + }; + + this._offset = newOffset; + + if (this._outputLengthLimit > 0) { + // When the start postion is larger than the limit, no data will be consumed though there is an event to be emitted. + // So the buffer should not be calculated. + if (range.start <= this._outputLengthLimit) { + if (this._offset > this._outputLengthLimit) { + // Don't use negative end parameter which means the index starting from the end of the buffer + // to be compatible with node 0.8. + buffer = buffer.slice(0, buffer.length - (this._offset - this._outputLengthLimit)); + } + if (this._md5hash) { + this._md5hash.update(buffer); + } + } + } else if (this._md5hash) { + this._md5hash.update(buffer); + } + + this.emit('data', buffer, range); +}; + +/** +* Copy data into internal buffer +*/ +ChunkStream.prototype._copyToInternalBuffer = function(data, start, end) { + if(start === undefined) start = 0; + if(end === undefined) end = data.length; + if (!this._buffer) { + this._buffer = this._allocateNewBuffer(); + this._internalBufferSize = 0; + } + var copied = data.copy(this._buffer, this._internalBufferSize, start, end); + this._internalBufferSize += copied; + + if (this._stream && this._stream._allocator && this._stream._allocator.releaseBuffer) { + this._stream._allocator.releaseBuffer(data); + } + + if(copied != (end - start)) { + throw new Error('Can not copy entire data to buffer'); + } +}; + +/** +* Flush internal buffer +*/ +ChunkStream.prototype._flushInternalBuffer = function() { + var buffer = this._popInternalBuffer(); + if (buffer) { + this._emitBufferData(buffer); + } +}; + +/** +* Pop internal buffer +*/ +ChunkStream.prototype._popInternalBuffer = function () { + var buf = null; + if (!this._buffer || this._internalBufferSize === 0) { + buf = null; + } else if(this._internalBufferSize == this._highWaterMark) { + buf = this._buffer; + } else { + buf = this._buffer.slice(0, this._internalBufferSize); + } + + this._buffer = null; + this._internalBufferSize = 0; + + return buf; +}; + +/** +* Allocate a buffer +*/ +ChunkStream.prototype._allocateNewBuffer = function() { + var size = this._highWaterMark; + if(this._allocator && azureutil.objectIsFunction(this._allocator.getBuffer)) { + return this._allocator.getBuffer(size); + } else { + var buffer = Buffer.alloc(size); + return buffer; + } +}; + +/** +* Get file content md5 when read completely. +*/ +ChunkStream.prototype.getContentMd5 = function(encoding) { + if (!encoding) encoding = 'base64'; + if(!this._md5hash) { + throw new Error('Can\'t get content md5, please set the calcContentMd5 option for FileReadStream.'); + } else { + if (this._streamEnded) { + if (!this._md5sum) { + this._md5sum = this._md5hash.digest(encoding); + } + return this._md5sum; + } else { + throw new Error('Stream has not ended.'); + } + } +}; + +/** +* Pause chunk stream +*/ +ChunkStream.prototype.pause = function() { + this._paused = true; +}; + +/** +* Resume read stream +*/ +ChunkStream.prototype.resume = function() { + if (this._paused) { + this._paused = false; + + this.emit('drain'); + } +}; + +module.exports = ChunkStream; + +/***/ }), + +/***/ 18802: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +var ChunkStream = __nccwpck_require__(40473); +var EventEmitter = (__nccwpck_require__(82361).EventEmitter); +var util = __nccwpck_require__(73837); +var azureutil = __nccwpck_require__(3396); + +/** +* Chunk stream +* 1. Calculate md5 +* 2. Track reading offset +* 3. Work with customize memory allocator +* 4. Buffer data from stream. +* @param {object} options stream.Readable options +*/ +function ChunkStreamWithStream(stream, options) { + ChunkStream.call(this, options); + + stream.pause(); // Pause stream and wait for data listener. It's useful for node v0.8 + this._stream = stream; + this._stream.on('end', this.end.bind(this)); // Should catch the end event for node v0.8 +} + +util.inherits(ChunkStreamWithStream, ChunkStream); + +/** +* Add event listener +*/ +ChunkStreamWithStream.prototype.on = function(event, listener) { + if(event === 'end' && this._streamEnded) { + listener(); //Directly call the end event when stream already ended + } else { + EventEmitter.prototype.on.call(this, event, listener); + } + + if (event === 'data') { + if (!this._isStreamOpened) { + this._isStreamOpened = true; + this._stream.on('data', this._buildChunk.bind(this)); + } + if (this._paused === undefined) { + this._stream.resume(); + } + } + + return this; +}; + +/** +* Stop stream from external +*/ +ChunkStreamWithStream.prototype.stop = function (chunk, encoding, cb) { + if (azureutil.objectIsFunction(this._stream.destroy)) { + this._stream.destroy(); + } else { + this.pause(); + } + ChunkStream.prototype.end.call(this, chunk, encoding, cb); +}; + +/** +* Pause chunk stream +*/ +ChunkStreamWithStream.prototype.pause = function () { + ChunkStream.prototype.pause.call(this); + + this._stream.pause(); +}; + +/** +* Resume read stream +*/ +ChunkStreamWithStream.prototype.resume = function() { + ChunkStream.prototype.resume.call(this); + + this._stream.resume(); +}; + +ChunkStreamWithStream.prototype.finish = function () { + ChunkStream.prototype.finish.call(this); + this._stream.emit.call(this._stream, 'finish'); +}; + +ChunkStreamWithStream.prototype.destroy = function () { + ChunkStream.prototype.destroy.call(this); + this._stream.emit.call(this._stream, 'close'); +}; + +module.exports = ChunkStreamWithStream; + + +/***/ }), + +/***/ 51477: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +var stream = __nccwpck_require__(12781); +var util = __nccwpck_require__(73837); +var fs = __nccwpck_require__(57147); +var validator = __nccwpck_require__(49420); + +var Md5Wrapper = __nccwpck_require__(11007); +var Constants = __nccwpck_require__(60658); +var bufferSize = Constants.BlobConstants.DEFAULT_WRITE_BLOCK_SIZE_IN_BYTES; + +var EventEmitter = (__nccwpck_require__(82361).EventEmitter); + +/** +* File read stream +* 1. Calculate md5 +* 2. Track reading offset +* 3. Work with customize memory allocator +* 4. Buffer data from stream. +* @param {object} options stream.Readable options +*/ +function FileReadStream(path, options) { + stream.Stream.call(this); + this.readable = true; + + if(!options) { + options = {}; + } + + this._destroyed = false; + this._streamEnded = false; + this._fd = null; + this._fileName = undefined; + this._highWaterMark = options.highWaterMark || bufferSize; + this._offset = 0; + this._paused = undefined; + this._allocator = options.allocator; + this._fileName = path; + + this._md5hash = null; + this._md5sum = undefined; + + if (options.calcContentMd5) { + this._md5hash = new Md5Wrapper().createMd5Hash(); + } + + this._open(); +} + +util.inherits(FileReadStream, stream.Stream); + +/** +* Open file +*/ +FileReadStream.prototype._open = function () { + var flags = 'r'; + var self = this; + fs.open(this._fileName, flags, function(error, fd) { + if (error) { + self.emit('error', error); + } else { + self._fd = fd; + self.emit('open', fd); + } + }); +}; + +/** +* Add event listener +*/ +FileReadStream.prototype.on = function(event, listener) { + if (event === 'data' && this._paused === undefined) { + this._paused = false; + this._emitData(); + } + + return EventEmitter.prototype.on.call(this, event, listener); +}; + +/** +* Set memory allocator +*/ +FileReadStream.prototype.setMemoryAllocator = function(allocator) { + this._allocator = allocator; +}; + +/** +* Get buffer +*/ +FileReadStream.prototype._getBuffer = function(size) { + if(this._allocator && this._allocator.getBuffer) { + return this._allocator.getBuffer(size); + } else { + var buffer = Buffer.alloc(size); + return buffer; + } +}; + +/** +* Release buffer +*/ +FileReadStream.prototype._releaseBuffer = function(buffer) { + if(this._allocator && this._allocator.releaseBuffer) { + this._allocator.releaseBuffer(buffer); + } +}; + +/** +* Emit the data from file +*/ +FileReadStream.prototype._emitData = function() { + var self = this; + if(!this._fd) { + this.once('open', function() { + self._emitData(); + }); + return; + } + + if (this._paused || this._streamEnded) { + return; + } + var buffer = this._getBuffer(this._highWaterMark); + fs.read(this._fd, buffer, 0, this._highWaterMark, this._offset, function(error, bytesRead, readBuffer) { + if (error) { + self.emit('error', error); + return; + } + + if(bytesRead === 0) { + if(!self._streamEnded) { + self._streamEnded = true; + self.emit('end'); + } + return; + } + + var range = { + start : self._offset, + end : self._offset + bytesRead - 1, + size : bytesRead + }; + + var data; + if(bytesRead == self._highWaterMark) { + data = readBuffer; + } else { + data = readBuffer.slice(0, bytesRead); + //Release the current buffer since we created a new one + self._releaseBuffer(readBuffer); + } + + if(self._md5hash) { + self._md5hash.update(data); + } + + self.emit('data', data, range); + + // cleanup + self._offset += bytesRead; + buffer = readBuffer = data = null; + self._emitData(); + }); +}; + +/** +* Get file content md5 when read completely. +*/ +FileReadStream.prototype.getContentMd5 = function(encoding) { + if (!encoding) encoding = 'base64'; + if(!this._md5hash) { + throw new Error('Can\'t get content md5, please set the calcContentMd5 option for FileReadStream.'); + } else { + if (this._streamEnded) { + if (!this._md5sum) { + this._md5sum = this._md5hash.digest(encoding); + } + return this._md5sum; + } else { + throw new Error('FileReadStream has not ended.'); + } + } +}; + +/** +* Pause chunk stream +*/ +FileReadStream.prototype.pause = function() { + this._paused = true; +}; + +/** +* Resume read stream +*/ +FileReadStream.prototype.resume = function() { + var previousState = this._paused; + if (this._paused) { + this._paused = false; + + if(previousState === true) { + //Only start to emit data when it's in pause state + this._emitData(); + } + } +}; + +FileReadStream.prototype.finish = function () { + this.destroy(); +}; + +FileReadStream.prototype.destroy = function () { + if (this._destroyed) { + return; + } + + var self = this; + this.readable = false; + + function close(fd) { + fs.close(fd || self._fd, function(err) { + if (err) { + self.emit('error', err); + } + else { + self.emit('close'); + } + }); + self._fd = null; + self._destroyed = true; + } + + // when the stream is closed immediately after creating it + if (!validator.isInt('' + this._fd)) { + this.once('open', close); + return; + } + + close(); +}; + +FileReadStream.prototype.stop = function () { + this.destroy(); + this._streamEnded = true; + this.emit('end'); +}; + +module.exports = FileReadStream; + + +/***/ }), + +/***/ 98664: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +var azureCommon = __nccwpck_require__(82187); +var azureutil = azureCommon.util; +var Constants = __nccwpck_require__(60658); +var EventEmitter = (__nccwpck_require__(82361).EventEmitter); + +/** +* Range stream +*/ +function RangeStream(serviceClient, container, blob, options) { + this.serviceClient = serviceClient; + this._emitter = new EventEmitter(); + this._paused = false; + this._emittedAll = false; + this._emittedRangeIndex = 0; + this._rangelist = []; + this._resourcePath = []; + this._isEmitting = false; + this._rangeStreamEnded = false; + this._lengthHeader = Constants.HeaderConstants.CONTENT_LENGTH; + this._minRangeSize = Constants.BlobConstants.MIN_WRITE_PAGE_SIZE_IN_BYTES; + this._maxRangeSize = Constants.BlobConstants.DEFAULT_WRITE_PAGE_SIZE_IN_BYTES; + if (options.rangeStart) { + this._startOffset = options.rangeStart; + } else { + this._startOffset = 0; + } + this._dataOffset = this._startOffset; + if (options.rangeEnd) { + this._endOffset = options.rangeEnd; + } else { + this._endOffset = Number.MAX_VALUE; + } + if (container) { + this._resourcePath.push(container); + } + if (blob) { + this._resourcePath.push(blob); + } +} + +/** +* Get range list +*/ +RangeStream.prototype.list = function (options, callback) { + var start = this._startOffset; + var end; + var singleRangeSize = Constants.BlobConstants.MAX_SINGLE_GET_PAGE_RANGE_SIZE; + + if (this._listFunc === undefined) { + // the default function puts the whole blob into a single list item + this._listFunc = this._defaultListFunc; + end = this._endOffset; + } else { + end = Math.min(this._startOffset + singleRangeSize - 1, this._endOffset); + } + options.rangeStart = start; + if (end != Number.MAX_VALUE) { + options.rangeEnd = end; + } + + var self = this; + var onList = function (error, ranges, response) { + if (error) { + callback(error); + } else { + if (self._rangeStreamEnded) { + return; + } + + var totalSize = parseInt(response.headers[self._lengthHeader], 10); + var endOffset = Math.min(totalSize - 1, self._endOffset); + var rangeEnd = Math.min(end, endOffset); + + if (!ranges.length) { + // convert single object to range + // start >= end means there is no valid regions + ranges.push({ start : start, end : rangeEnd, dataSize: 0 }); + } else if (ranges[ranges.length - 1].end !== rangeEnd) { + // don't forget the zero chunk at the end of range + ranges.push({ start : ranges[ranges.length - 1].end + 1, end : rangeEnd, dataSize: 0 }); + } + + if (end >= endOffset) { + self._rangeStreamEnded = true; + } + self.resizeAndSaveRanges(ranges); + self._startOffset += singleRangeSize; + self._emitRange(); + + // This is only valid when listing pages because when listing with the default function, the "endOffset" will always equal to or greater than the "end". + if (end < endOffset && !self._rangeStreamEnded) { + process.nextTick(function () { + ranges = null; + self.list(options, callback); + self = null; + }); + } + } + }; + + var callArguments = Array.prototype.slice.call(this._resourcePath); + callArguments.push(options); + callArguments.push(onList); + this._listFunc.apply(this.serviceClient, callArguments); +}; + +/** +* Resize regions: +* 1. Merge small pieces into a range no less than this._minRangeSize +* 2. Split large pieces into ranges no more than this._maxRangeSize +*/ +RangeStream.prototype.resizeAndSaveRanges = function (ranges) { + var rangeList = this._rangelist; + var holdingRange = { type : 'range', size : 0, dataSize : 0, start : this._startOffset, end : -1 }; + var readingRange = null; + var rangeSize = 0; + + for (var index = 0; index < ranges.length; index++) { + readingRange = ranges[index]; + rangeSize = readingRange.end - holdingRange.start + 1; + + if (rangeSize < this._minRangeSize) { + // merge fragment ranges + this.mergeRanges(holdingRange, readingRange); + } else { + if (holdingRange.end != -1) { + // save the holding range list and hold the reading range + this.splitAndSaveRanges(holdingRange, rangeList); + holdingRange = readingRange; + } + + if (this._dataOffset != readingRange.start) { + // padding zero for empty range and hold the reading range + this.putZeroRange(this._dataOffset, readingRange.start - 1, rangeList); + holdingRange = readingRange; + } else if (holdingRange.end == -1) { + // if holdingRange is never set, it means readingRange exceeds MIN_WRITE_FILE_SIZE_IN_BYTES + this.splitAndSaveRanges(readingRange, rangeList); + // reading range has been saved, offset the holding start position for calculating the range size in next loop + holdingRange.start = readingRange.end + 1; + } + } + + // If it is the last range, put the holding range into list anyway + if (index == ranges.length - 1 && holdingRange.end > holdingRange.start) { + this.splitAndSaveRanges(holdingRange, rangeList); + } + + this._dataOffset = readingRange.end + 1; + } +}; + +/** +* Put a zero range into range list +*/ +RangeStream.prototype.putZeroRange = function (startOffset, endOffset, rangeList) { + var zeroDataRange = { type : 'range', size : -1, dataSize : 0, start : startOffset, end : endOffset }; + this.splitAndSaveRanges(zeroDataRange, rangeList); +}; + +/** +* Merge small ranges +*/ +RangeStream.prototype.mergeRanges = function (holdingRange, readingRange) { + holdingRange.size = readingRange.end - holdingRange.start + 1; + holdingRange.dataSize += readingRange.dataSize; + holdingRange.end = readingRange.end; + return holdingRange; +}; + +/** +* Split range into small pieces with maximum _maxRangeSize and minimum _minRangeSize size. +* For example, [0, 10G - 1] => [0, 4MB - 1], [4MB, 8MB - 1] ... [10GB - 4MB, 10GB - 1] +*/ +RangeStream.prototype.splitAndSaveRanges = function (range, rangeList) { + var rangeSize = range.end - range.start + 1; + var offset = range.start; + var limitedSize = 0; + + while (rangeSize > 0) { + var newRange = { type : 'range', size : 0, dataSize : 0, start : -1, end : -1 }; + limitedSize = Math.min(rangeSize, this._maxRangeSize); + newRange.start = offset; + newRange.size = limitedSize; + if (range.dataSize === 0) { + newRange.dataSize = 0; + } else { + newRange.dataSize = limitedSize; + } + offset += limitedSize; + newRange.end = offset - 1; + rangeList.push(newRange); + rangeSize -= limitedSize; + } +}; + +/** +* Emit a range +*/ +RangeStream.prototype._emitRange = function () { + if (this._paused || this._emittedAll || this._isEmitting) return; + this._isEmitting = true; + try { + for (; this._emittedRangeIndex < this._rangelist.length; this._emittedRangeIndex++) { + if (this._paused) { + return; + } + var range = this._rangelist[this._emittedRangeIndex]; + this._emitter.emit('range', range); + this._rangelist[this._emittedRangeIndex] = null; + } + + if (this._rangeStreamEnded) { + this._rangelist = null; + this._emittedAll = true; + this._emitter.emit('end'); + } + } finally { + this._isEmitting = false; + } +}; + +/** +* The Default list function which puts the whole blob into one range. +*/ +RangeStream.prototype._defaultListFunc = function (container, blob, optionsOrCallback, callback) { + var options; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { options = o; callback = c; }); + + this.getBlobProperties(container, blob, options, function (error, result, response) { + if (error) { + callback(error); + } else { + var range = [{}]; + range[0].start = options.rangeStart ? Math.max(options.rangeStart, 0) : 0; + range[0].end = options.rangeEnd ? Math.min(options.rangeEnd, result.contentLength - 1) : result.contentLength - 1; + range[0].size = range[0].end - range[0].start + 1; + range[0].dataSize = range[0].size; + callback(error, range, response); + } + }); +}; + +/** +* Add event listener +*/ +RangeStream.prototype.on = function (event, listener) { + this._emitter.on(event, listener); +}; + +/** +* Pause the stream +*/ +RangeStream.prototype.pause = function () { + this._paused = true; +}; + +/** +* Resume the stream +*/ +RangeStream.prototype.resume = function () { + this._paused = false; + if (!this._isEmitting) { + this._emitRange(); + } +}; + +/** +* Stop the stream +*/ +RangeStream.prototype.stop = function () { + this.pause(); + this._emittedAll = true; + this._emitter.emit('end'); +}; + +module.exports = RangeStream; + + +/***/ }), + +/***/ 39201: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +var EventEmitter = __nccwpck_require__(82361); +var util = __nccwpck_require__(73837); +var azureutil = __nccwpck_require__(3396); + +/** +* Blob upload/download speed summary. +* Trigger 'progress' event every progress updates. +*/ +function SpeedSummary (name) { + this.name = name; + this._startTime = Date.now(); + this._timeWindowInSeconds = 10; + this._timeWindow = this._timeWindowInSeconds * 1000; + this._totalWindowSize = 0; + this._speedTracks = new Array(this._timeWindowInSeconds); + this._speedTrackPtr = 0; + this.totalSize = undefined; + this.completeSize = 0; +} + +util.inherits(SpeedSummary, EventEmitter); + +/** +* Convert the size to human readable size +*/ +function toHumanReadableSize(size, len) { + if(!size) return '0B'; + if (!len || len <= 0) { + len = 2; + } + var units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB']; + var i = Math.floor( Math.log(size) / Math.log(1024)); + return (size/Math.pow(1024, i)).toFixed(len) + units[i]; +} + +/** +* Get running seconds +*/ +SpeedSummary.prototype.getElapsedSeconds = function(humanReadable) { + var now = Date.now(); + var seconds = parseInt((now - this._startTime) / 1000, 10); + if (humanReadable !== false) { + var s = parseInt(seconds % 60, 10); + seconds /= 60; + var m = Math.floor(seconds % 60); + seconds /= 60; + var h = Math.floor(seconds); + seconds = util.format('%s:%s:%s', azureutil.zeroPaddingString(h, 2), azureutil.zeroPaddingString(m, 2), azureutil.zeroPaddingString(s, 2)); + } + return seconds; +}; + +/** +* Get complete percentage +* @param {int} len The number of digits after the decimal point. +*/ +SpeedSummary.prototype.getCompletePercent = function(len) { + if (this.totalSize) { + if(!len || len <= 0) { + len = 1; + } + return (this.completeSize * 100 / this.totalSize).toFixed(len); + } else { + if(this.totalSize === 0) { + return 100; + } else { + return 0; + } + } +}; + +/** +* Get average upload/download speed +*/ +SpeedSummary.prototype.getAverageSpeed = function(humanReadable) { + var elapsedTime = this.getElapsedSeconds(false); + return this._getInternalSpeed(this.completeSize, elapsedTime, humanReadable); +}; + +/** +* Get instant speed +*/ +SpeedSummary.prototype.getSpeed = function(humanReadable) { + this._refreshSpeedTracks(); + var elapsedTime = this.getElapsedSeconds(false); + elapsedTime = Math.min(elapsedTime, this._timeWindowInSeconds); + return this._getInternalSpeed(this._totalWindowSize, elapsedTime, humanReadable); +}; + +/** +* Get internal speed +*/ +SpeedSummary.prototype._getInternalSpeed = function(totalSize, elapsedTime, humanReadable) { + if (elapsedTime <= 0) { + elapsedTime = 1; + } + var speed = totalSize / elapsedTime; + if(humanReadable !== false) { + speed = toHumanReadableSize(speed) + '/s'; + } + return speed; +}; + +/** +* Refresh speed tracks +*/ +SpeedSummary.prototype._refreshSpeedTracks = function() { + var now = Date.now(); + var totalSize = 0; + for(var i = 0; i < this._speedTracks.length; i++) { + if(!this._speedTracks[i]) continue; + if(now - this._speedTracks[i].timeStamp <= this._timeWindow) { + totalSize += this._speedTracks[i].size; + } else { + this._speedTracks[i] = null; + } + } + this._totalWindowSize = totalSize; +}; + +/** +* Increment the complete data size +*/ +SpeedSummary.prototype.increment = function(len) { + this.completeSize += len; + this._recordSpeed(len); + + var that = this; + process.nextTick(function () { + that.emit('progress'); + }); + + return this.completeSize; +}; + +/** +* record complete size into speed tracks +*/ +SpeedSummary.prototype._recordSpeed = function(completeSize) { + var now = Date.now(); + var track = this._speedTracks[this._speedTrackPtr]; + if(track) { + var timeDiff = now - track.timeStamp; + if(timeDiff > this._timeWindow) { + track.timeStamp = now; + track.size = completeSize; + } else if(timeDiff <= 1000) { //1 seconds + track.size += completeSize; + } else { + this._speedTrackPtr = (this._speedTrackPtr + 1) % this._timeWindowInSeconds; + this._recordSpeed(completeSize); + } + } else { + track = {timeStamp : now, size: completeSize}; + this._speedTracks[this._speedTrackPtr] = track; + } +}; + +/** +* Get auto increment function +*/ +SpeedSummary.prototype.getAutoIncrementFunction = function(size) { + var self = this; + return function(error, retValue) { + if(!error) { + var doneSize = 0; + if((!retValue && retValue !== 0) || isNaN(retValue)) { + doneSize = size; + } else { + doneSize = retValue; + } + self.increment(doneSize); + } + }; +}; + +/** +* Get total size +*/ +SpeedSummary.prototype.getTotalSize = function(humanReadable) { + if (humanReadable !== false) { + return toHumanReadableSize(this.totalSize); + } else { + return this.totalSize; + } +}; + +/** +* Get completed data size +*/ +SpeedSummary.prototype.getCompleteSize = function(humanReadable) { + if (humanReadable !== false) { + return toHumanReadableSize(this.completeSize); + } else { + return this.completeSize; + } +}; + +module.exports = SpeedSummary; + + +/***/ }), + +/***/ 60913: +/***/ ((module, exports) => { + +"use strict"; +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Expose 'AccessCondition'. + +/** +* Defines constants, enums, and utility functions for use with storage access condition. +* @namespace +*/ + + + +exports = module.exports; + +/** +* Constructs an empty access condition. +* +* @return {object} An empty AccessCondition object +*/ +exports.generateEmptyCondition = function () { + return {}; +}; + +/** +* Constructs an access condition such that an operation will be performed only if the resource does not exist on the service +* +* Setting this access condition modifies the request to include the HTTP If-None-Match conditional header + +* @return {AccessConditions} An AccessCondition object that represents a condition that checks for nonexistence +*/ +exports.generateIfNotExistsCondition = function () { + var accessCondition = {}; + accessCondition.EtagNonMatch = '*'; + return accessCondition; +}; + +/** +* Constructs an access condition such that an operation will be performed only if the resource exists on the service +* +* Setting this access condition modifies the request to include the HTTP If-Match conditional header + +* @return {AccessConditions} An AccessCondition object that represents a condition that checks for existence +*/ +exports.generateIfExistsCondition = function () { + var accessCondition = {}; + accessCondition.EtagMatch = '*'; + return accessCondition; +}; + +/** +* Constructs an access condition such that an operation will be performed only if the resource's ETag value +* does not match the specified ETag value +* +* Setting this access condition modifies the request to include the HTTP If-None-Match conditional header +* +* @param {string} etag The ETag value to check against the resource's ETag +* @return {AccessConditions} An AccessCondition object that represents the If-None-Match condition +*/ +exports.generateIfNoneMatchCondition = function (etag) { + var accessCondition = {}; + accessCondition.EtagNonMatch = etag; + return accessCondition; +}; + +/** +* Constructs an access condition such that an operation will be performed only if the resource's ETag value +* matches the specified ETag value +* +* Setting this access condition modifies the request to include the HTTP If-Match conditional header +* +* @param {string} etag The ETag value to check against the resource's ETag +* @return {AccessConditions} An AccessCondition object that represents the If-Match condition +*/ +exports.generateIfMatchCondition = function (etag) { + var accessCondition = {}; + accessCondition.EtagMatch = etag; + return accessCondition; +}; + +/** +* Constructs an access condition such that an operation will be performed only if the resource has been +* modified since the specified time +* +* Setting this access condition modifies the request to include the HTTP If-Modified-Since conditional header +* +* @param {Date|string} time A date object specifying the time since which the resource must have been modified +* @return {AccessConditions} An AccessCondition object that represents the If-Modified-Since condition +*/ +exports.generateIfModifiedSinceCondition = function (time) { + var accessCondition = {}; + accessCondition.DateModifedSince = time; + return accessCondition; +}; + +/** +* Constructs an access condition such that an operation will be performed only if the resource has not been +* modified since the specified time +* +* Setting this access condition modifies the request to include the HTTP If-Unmodified-Since conditional header +* +* @param {Date|string} time A date object specifying the time since which the resource must have not been modified +* @return {AccessConditions} An AccessCondition object that represents the If-Unmodified-Since condition +*/ +exports.generateIfNotModifiedSinceCondition = function (time) { + var accessCondition = {}; + accessCondition.DateUnModifiedSince = time; + return accessCondition; +}; + +/** +* Constructs an access condition such that an operation will be performed only if the resource's sequence number +* is equal to the specified value +* +* Setting this access condition modifies the request to include the HTTP x-ms-if-sequence-number-eq conditional header +* +* @param {Number|string} sequenceNumber A date object specifying the time since which the resource must have not been modified +* @return {AccessConditions} An AccessCondition object that represents the If-Unmodified-Since condition +*/ +exports.generateSequenceNumberEqualCondition = function (sequenceNumber) { + var accessCondition = {}; + accessCondition.SequenceNumberEqual = sequenceNumber; + return accessCondition; +}; + +/** +* Constructs an access condition such that an operation will be performed only if the resource's sequence number +* is less than the specified value +* +* Setting this access condition modifies the request to include the HTTP x-ms-if-sequence-number-lt conditional header +* +* @param {Number|string} sequenceNumber A date object specifying the time since which the resource must have not been modified +* @return {AccessConditions} An AccessCondition object that represents the If-Unmodified-Since condition +*/ +exports.generateSequenceNumberLessThanCondition = function (sequenceNumber) { + var accessCondition = {}; + accessCondition.SequenceNumberLessThan = sequenceNumber; + return accessCondition; +}; + +/** +* Constructs an access condition such that an operation will be performed only if the resource's sequence number +* is less than or equal to the specified value +* +* Setting this access condition modifies the request to include the HTTP x-ms-if-sequence-number-le conditional header +* +* @param {Number|string} sequenceNumber A date object specifying the time since which the resource must have not been modified +* @return {AccessConditions} An AccessCondition object that represents the If-Unmodified-Since condition +*/ +exports.generateSequenceNumberLessThanOrEqualCondition = function (sequenceNumber) { + var accessCondition = {}; + accessCondition.SequenceNumberLessThanOrEqual = sequenceNumber; + return accessCondition; +}; + +/***/ }), + +/***/ 60658: +/***/ ((module, exports) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Expose 'Constants'. +exports = module.exports; + +var storageDnsSuffix = process.env.AZURE_STORAGE_DNS_SUFFIX || 'core.windows.net'; + +/** +* Defines constants. +*/ +var Constants = { + /* + * Specifies the value to use for UserAgent header. + * + * @const + * @type {string} + */ + USER_AGENT_PRODUCT_NAME: 'Azure-Storage', + + /* + * Specifies the value to use for UserAgent header. + * + * @const + * @type {string} + */ + USER_AGENT_PRODUCT_VERSION: '2.10.7', + + /** + * The number of default concurrent requests for parallel operation. + * + * @const + * @type {int} + */ + DEFAULT_PARALLEL_OPERATION_THREAD_COUNT: 5, + + /** + * The value of default socket reuse for batch operation. + * + * @const + * @type {boolean} + */ + DEFAULT_ENABLE_REUSE_SOCKET: true, + + /** + * Constant representing a kilobyte (Non-SI version). + * + * @const + * @type {int} + */ + KB: 1024, + + /** + * Constant representing a megabyte (Non-SI version). + * + * @const + * @type {int} + */ + MB: 1024 * 1024, + + /** + * Constant representing a gigabyte (Non-SI version). + * + * @const + * @type {int} + */ + GB: 1024 * 1024 * 1024, + + /** + * Specifies HTTP. + * + * @const + * @type {string} + */ + HTTP: 'http:', + + /** + * Specifies HTTPS. + * + * @const + * @type {string} + */ + HTTPS: 'https:', + + /** + * Default HTTP port. + * + * @const + * @type {int} + */ + DEFAULT_HTTP_PORT: 80, + + /** + * Default HTTPS port. + * + * @const + * @type {int} + */ + DEFAULT_HTTPS_PORT: 443, + + /** + * Default client request timeout in milliseconds. + * Integer containing the number of milliseconds to wait for a server to send response headers (and start the response body) before aborting the request. + * 2 minutes by default. + * + * @const + * @type {int} + */ + DEFAULT_CLIENT_REQUEST_TIMEOUT_IN_MS: 120000, + + /** + * Marker for atom metadata. + * + * @const + * @type {string} + */ + XML_METADATA_MARKER: '$', + + /** + * Marker for atom value. + * + * @const + * @type {string} + */ + XML_VALUE_MARKER: '_', + + /** + * Defines the service types indicators. + * + * @const + * @enum {string} + */ + ServiceType: { + Blob: 'blob', + Queue: 'queue', + Table: 'table', + File: 'file' + }, + + /** + * Specifies the location used to indicate which location the operation can be performed against. + * + * @const + * @enum {int} + */ + RequestLocationMode: { + PRIMARY_ONLY: 0, + SECONDARY_ONLY: 1, + PRIMARY_OR_SECONDARY: 2 + }, + + /** + * Represents a storage service location. + * + * @const + * @enum {int} + */ + StorageLocation: { + PRIMARY: 0, + SECONDARY: 1 + }, + + /** + * Defines constants for use with account SAS. + */ + AccountSasConstants:{ + /** + * Permission types. + * + * @const + * @enum {string} + */ + Permissions: { + READ: 'r', + ADD: 'a', + CREATE: 'c', + UPDATE: 'u', + PROCESS: 'p', + WRITE: 'w', + DELETE: 'd', + LIST: 'l' + }, + + /** + * Services types. + * + * @const + * @enum {string} + */ + Services: { + BLOB: 'b', + FILE: 'f', + QUEUE: 'q', + TABLE: 't' + }, + + /** + * Resources types. + * + * @const + * @enum {string} + */ + Resources: { + SERVICE: 's', + CONTAINER: 'c', + OBJECT: 'o' + }, + + /** + * Protocols types. + * + * @const + * @enum {string} + */ + Protocols: { + HTTPSONLY: 'https', + HTTPSORHTTP: 'https,http' + } + }, + + /** + * Defines constants for use with shared access policies. + */ + AclConstants: { + /** + * XML element for an access policy. + * + * @const + * @type {string} + */ + ACCESS_POLICY: 'AccessPolicy', + + /** + * XML element for the end time of an access policy. + * + * @const + * @type {string} + */ + EXPIRY: 'Expiry', + + /** + * XML attribute for IDs. + * + * @const + * @type {string} + */ + ID: 'Id', + + /** + * XML element for the permission of an access policy. + * + * @const + * @type {string} + */ + PERMISSION: 'Permission', + + /** + * XML element for a signed identifier. + * + * @const + * @type {string} + */ + SIGNED_IDENTIFIER_ELEMENT: 'SignedIdentifier', + + /** + * XML element for signed identifiers. + * + * @const + * @type {string} + */ + SIGNED_IDENTIFIERS_ELEMENT: 'SignedIdentifiers', + + /** + * XML element for the start time of an access policy. + * + * @const + * @type {string} + */ + START: 'Start' + }, + + /** + * Defines constants for use with service properties. + */ + ServicePropertiesConstants: { + /** + * XML element for storage service properties. + * + * @const + * @type {string} + */ + STORAGE_SERVICE_PROPERTIES_ELEMENT: 'StorageServiceProperties', + + /** + * Default analytics version to send for logging, hour metrics and minute metrics. + * + * @const + * @type {string} + */ + DEFAULT_ANALYTICS_VERSION: '1.0', + + /** + * XML element for logging. + * + * @const + * @type {string} + */ + LOGGING_ELEMENT: 'Logging', + + /** + * XML element for version. + * + * @const + * @type {string} + */ + VERSION_ELEMENT: 'Version', + + /** + * XML element for delete. + * + * @const + * @type {string} + */ + DELETE_ELEMENT: 'Delete', + + /** + * XML element for read. + * + * @const + * @type {string} + */ + READ_ELEMENT: 'Read', + + /** + * XML element for write. + * + * @const + * @type {string} + */ + WRITE_ELEMENT: 'Write', + + /** + * XML element for retention policy. + * + * @const + * @type {string} + */ + RETENTION_POLICY_ELEMENT: 'RetentionPolicy', + + /** + * XML element for enabled. + * + * @const + * @type {string} + */ + ENABLED_ELEMENT: 'Enabled', + + /** + * XML element for days. + * + * @const + * @type {string} + */ + DAYS_ELEMENT: 'Days', + + /** + * XML element for HourMetrics. + * + * @const + * @type {string} + */ + HOUR_METRICS_ELEMENT: 'HourMetrics', + + /** + * XML element for MinuteMetrics. + * + * @const + * @type {string} + */ + MINUTE_METRICS_ELEMENT: 'MinuteMetrics', + + /** + * XML element for Cors. + * + * @const + * @type {string} + */ + CORS_ELEMENT: 'Cors', + + /** + * XML element for CorsRule. + * + * @const + * @type {string} + */ + CORS_RULE_ELEMENT: 'CorsRule', + + /** + * XML element for AllowedOrigins. + * + * @const + * @type {string} + */ + ALLOWED_ORIGINS_ELEMENT: 'AllowedOrigins', + + /** + * XML element for AllowedMethods. + * + * @const + * @type {string} + */ + ALLOWED_METHODS_ELEMENT: 'AllowedMethods', + + /** + * XML element for MaxAgeInSeconds. + * + * @const + * @type {string} + */ + MAX_AGE_IN_SECONDS_ELEMENT: 'MaxAgeInSeconds', + + /** + * XML element for ExposedHeaders. + * + * @const + * @type {string} + */ + EXPOSED_HEADERS_ELEMENT: 'ExposedHeaders', + + /** + * XML element for AllowedHeaders. + * + * @const + * @type {string} + */ + ALLOWED_HEADERS_ELEMENT: 'AllowedHeaders', + + /** + * XML element for IncludeAPIs. + * + * @const + * @type {string} + */ + INCLUDE_APIS_ELEMENT: 'IncludeAPIs', + + /** + * XML element for DefaultServiceVersion. + * + * @const + * @type {string} + */ + DEFAULT_SERVICE_VERSION_ELEMENT: 'DefaultServiceVersion', + + /** + * XML element for DeleteRetentionPolicy. + * + * @const + * @type {string} + */ + DEFAULT_DELETE_RETENTION_POLICY_ELEMENT: 'DeleteRetentionPolicy', + + /** + * XML element for StaticWebsite. + * + * @const + * @type {string} + */ + DEFAULT_STATIC_WEBSITE_ELEMENT: 'StaticWebsite', + + /** + * XML element for StaticWebsite/IndexDocument. + * + * @const + * @type {string} + */ + DEFAULT_INDEX_DOCUMENT_ELEMENT: 'IndexDocument', + + /** + * XML element for StaticWebsite/ErrorDocument404Path. + * + * @const + * @type {string} + */ + DEFAULT_ERROR_DOCUMENT_404_PATH_ELEMENT: 'ErrorDocument404Path' + }, + + /** + * Defines constants for use with blob operations. + */ + BlobConstants: { + /** + * XML element for the latest. + * + * @const + * @type {string} + */ + LATEST_ELEMENT: 'Latest', + + /** + * XML element for uncommitted blocks. + * + * @const + * @type {string} + */ + UNCOMMITTED_ELEMENT: 'Uncommitted', + + /** + * XML element for a block list. + * + * @const + * @type {string} + */ + BLOCK_LIST_ELEMENT: 'BlockList', + + /** + * XML element for committed blocks. + * + * @const + * @type {string} + */ + COMMITTED_ELEMENT: 'Committed', + + /** + * The default write page size, in bytes, used by blob streams. + * + * @const + * @type {int} + */ + DEFAULT_WRITE_PAGE_SIZE_IN_BYTES: 4 * 1024 * 1024, + + /** + * The minimum write page size, in bytes, used by blob streams. + * + * @const + * @type {int} + */ + MIN_WRITE_PAGE_SIZE_IN_BYTES: 2 * 1024 * 1024, + + /** + * The default maximum size, in bytes, of a blob before it must be separated into blocks. + * + * @const + * @type {int} + */ + DEFAULT_SINGLE_BLOB_PUT_THRESHOLD_IN_BYTES: 32 * 1024 * 1024, + + /** + * The default write block size, in bytes, used by blob streams. + * + * @const + * @type {int} + */ + DEFAULT_WRITE_BLOCK_SIZE_IN_BYTES: 4 * 1024 * 1024, + + /** + * The default critical memory limitation in 32bit Node.js environment, in bytes. + * + * @const + * @type {int} + */ + DEFAULT_CRITICAL_MEMORY_LIMITATION_32_IN_BYTES: 800 * 1024 * 1024, + + /** + * The default critical memory limitation in browser environment, in bytes. + * + * @const + * @type {int} + */ + DEFAULT_CRITICAL_MEMORY_LIMITATION_BROWSER_IN_BYTES: 1 * 1024 * 1024 * 1024, + + /** + * The default minimum memory usage in browser environment, in bytes. + * + * @const + * @type {int} + */ + DEFAULT_MINIMUM_MEMORY_USAGE_BROWSER_IN_BYTES: 4 * 1024 * 1024, + + /** + * The maximum size of a single block of block blob. + * + * @const + * @type {int} + */ + MAX_BLOCK_BLOB_BLOCK_SIZE: 100 * 1024 * 1024, + + /** + * The maximum size of a single block of append blob. + * + * @const + * @type {int} + */ + MAX_APPEND_BLOB_BLOCK_SIZE: 4 * 1024 * 1024, + + /** + * The maximum size, in bytes, of a blob before it must be separated into blocks. + * + * @const + * @type {int} + */ + MAX_SINGLE_UPLOAD_BLOB_SIZE_IN_BYTES: 64 * 1024 * 1024, + + /** + * The maximum range get size when requesting for a contentMD5. + * + * @const + * @type {int} + */ + MAX_RANGE_GET_SIZE_WITH_MD5 : 4 * 1024 * 1024, + + /** + * The maximum page range size for a page update operation. + * + * @const + * @type {int} + */ + MAX_UPDATE_PAGE_SIZE : 4 * 1024 * 1024, + + /** + * The maximum buffer size for writing a stream buffer. + * + * @const + * @type {int} + */ + MAX_QUEUED_WRITE_DISK_BUFFER_SIZE : 64 * 1024 * 1024, + + /** + * Max size for single get page range. The max value should be 150MB. + * http://blogs.msdn.com/b/windowsazurestorage/archive/2012/03/26/getting-the-page-ranges-of-a-large-page-blob-in-segments.aspx + * + * @const + * @type {int} + */ + MAX_SINGLE_GET_PAGE_RANGE_SIZE : 37 * 4 * 1024 * 1024, + + /** + * The size of a page, in bytes, in a page blob. + * + * @const + * @type {int} + */ + PAGE_SIZE: 512, + + /** + * Resource types. + * + * @const + * @enum {string} + */ + ResourceTypes: { + CONTAINER: 'c', + BLOB: 'b' + }, + + /** + * List blob types. + * + * @const + * @enum {string} + */ + ListBlobTypes: { + Blob: 'b', + Directory: 'd' + }, + + /** + * Put page write options + * + * @const + * @enum {string} + */ + PageWriteOptions: { + UPDATE: 'update', + CLEAR: 'clear' + }, + + /** + * Blob types + * + * @const + * @enum {string} + */ + BlobTypes: { + BLOCK: 'BlockBlob', + PAGE: 'PageBlob', + APPEND: 'AppendBlob' + }, + + /** + * Blob lease constants + * + * @const + * @enum {string} + */ + LeaseOperation: { + ACQUIRE: 'acquire', + RENEW: 'renew', + CHANGE: 'change', + RELEASE: 'release', + BREAK: 'break' + } + }, + + /** + * Defines constants for use with file operations. + */ + FileConstants: { + /** + * The default write size, in bytes, used by file streams. + * + * @const + * @type {int} + */ + DEFAULT_WRITE_SIZE_IN_BYTES: 4 * 1024 * 1024, + + /** + * The maximum range size when requesting for a contentMD5. + * + * @const + * @type {int} + */ + MAX_RANGE_GET_SIZE_WITH_MD5 : 4 * 1024 * 1024, + + /** + * The maximum range size for a file update operation. + * + * @const + * @type {int} + */ + MAX_UPDATE_FILE_SIZE : 4 * 1024 * 1024, + + /** + * The default minimum size, in bytes, of a file when it must be separated into ranges. + * + * @const + * @type {int} + */ + DEFAULT_SINGLE_FILE_GET_THRESHOLD_IN_BYTES: 32 * 1024 * 1024, + + /** + * The minimum write file size, in bytes, used by file streams. + * + * @const + * @type {int} + */ + MIN_WRITE_FILE_SIZE_IN_BYTES: 2 * 1024 * 1024, + + /** + * Put range write options + * + * @const + * @enum {string} + */ + RangeWriteOptions: { + UPDATE: 'update', + CLEAR: 'clear' + }, + + /** + * Resource types. + * + * @const + * @enum {string} + */ + ResourceTypes: { + SHARE: 's', + FILE: 'f' + } + }, + + /** + * Defines constants for use with queue storage. + */ + QueueConstants: { + /** + * XML element for QueueMessage. + * + * @const + * @type {string} + */ + QUEUE_MESSAGE_ELEMENT: 'QueueMessage', + + /** + * XML element for MessageText. + * + * @const + * @type {string} + */ + MESSAGE_TEXT_ELEMENT: 'MessageText' + }, + + /** + * Defines constants for use with table storage. + */ + TableConstants: { + /** + * The changeset response delimiter. + * + * @const + * @type {string} + */ + CHANGESET_DELIMITER: '--changesetresponse_', + + /** + * The batch response delimiter. + * + * @const + * @type {string} + */ + BATCH_DELIMITER: '--batchresponse_', + + /** + * The next continuation row key token. + * + * @const + * @type {string} + */ + CONTINUATION_NEXT_ROW_KEY: 'x-ms-continuation-nextrowkey', + + /** + * The next continuation partition key token. + * + * @const + * @type {string} + */ + CONTINUATION_NEXT_PARTITION_KEY: 'x-ms-continuation-nextpartitionkey', + + /** + * The next continuation table name token. + * + * @const + * @type {string} + */ + CONTINUATION_NEXT_TABLE_NAME: 'x-ms-continuation-nexttablename', + + /** + * The next row key query string argument. + * + * @const + * @type {string} + */ + NEXT_ROW_KEY: 'NextRowKey', + + /** + * The next partition key query string argument. + * + * @const + * @type {string} + */ + NEXT_PARTITION_KEY: 'NextPartitionKey', + + /** + * The next table name query string argument. + * + * @const + * @type {string} + */ + NEXT_TABLE_NAME: 'NextTableName', + + /** + * Prefix of the odata properties returned in a JSON query. + * + * @const + * @type {string} + */ + ODATA_PREFIX: 'odata.', + + /** + * Constant representing the string following a type annotation in a JSON table query. + * + * @const + * @type {string} + */ + ODATA_TYPE_SUFFIX: '@odata.type', + + /** + * Constant representing the property where the odata metadata elements are stored. + * + * @const + * @type {string} + */ + ODATA_METADATA_MARKER: '.metadata', + + /** + * Constant representing the value for an entity property. + * + * @const + * @type {string} + */ + ODATA_VALUE_MARKER: '_', + + /** + * Constant representing the type for an entity property. + * + * @const + * @type {string} + */ + ODATA_TYPE_MARKER: '$', + + /** + * The value to set the maximum data service version header. + * + * @const + * @type {string} + */ + DEFAULT_DATA_SERVICE_VERSION: '3.0;NetFx', + + /** + * The name of the property that stores the table name. + * + * @const + * @type {string} + */ + TABLE_NAME: 'TableName', + + /** + * The name of the special table used to store tables. + * + * @const + * @type {string} + */ + TABLE_SERVICE_TABLE_NAME: 'Tables', + + /** + * Operations. + * + * @const + * @enum {string} + */ + Operations: { + RETRIEVE: 'RETRIEVE', + INSERT: 'INSERT', + REPLACE: 'REPLACE', + MERGE: 'MERGE', + DELETE: 'DELETE', + INSERT_OR_REPLACE: 'INSERT_OR_REPLACE', + INSERT_OR_MERGE: 'INSERT_OR_MERGE' + } + }, + + /** + * Defines constants for use with HTTP headers. + */ + HeaderConstants: { + /** + * The accept ranges header. + * + * @const + * @type {string} + */ + ACCEPT_RANGES: 'accept_ranges', + + /** + * The content transfer encoding header. + * + * @const + * @type {string} + */ + CONTENT_TRANSFER_ENCODING: 'content-transfer-encoding', + + /** + * The transfer encoding header. + * + * @const + * @type {string} + */ + TRANSFER_ENCODING: 'transfer-encoding', + + /** + * The server header. + * + * @const + * @type {string} + */ + SERVER: 'server', + + /** + * The location header. + * + * @const + * @type {string} + */ + LOCATION: 'location', + + /** + * The Last-Modified header. + * + * @const + * @type {string} + */ + LAST_MODIFIED: 'Last-Modified', + + /** + * The creation time header. + * + * @const + * @type {string} + */ + CREATION_TIME: 'x-ms-creation-time', + + /** + * The data service version. + * + * @const + * @type {string} + */ + DATA_SERVICE_VERSION: 'dataserviceversion', + + /** + * The maximum data service version. + * + * @const + * @type {string} + */ + MAX_DATA_SERVICE_VERSION: 'maxdataserviceversion', + + /** + * The master Windows Azure Storage header prefix. + * + * @const + * @type {string} + */ + PREFIX_FOR_STORAGE: 'x-ms-', + + /** + * The client request Id header. + * + * @const + * @type {string} + */ + CLIENT_REQUEST_ID: 'x-ms-client-request-id', + + /** + * The header that specifies the approximate message count of a queue. + * + * @const + * @type {string} + */ + APPROXIMATE_MESSAGES_COUNT: 'x-ms-approximate-messages-count', + + /** + * The Authorization header. + * + * @const + * @type {string} + */ + AUTHORIZATION: 'authorization', + + /** + * The header that is used to avoid browser cache. + * + * @const + * @type {string} + */ + FORCE_NO_CACHE_IN_BROWSER: '_', + + /** + * The header that specifies public access to blobs. + * + * @const + * @type {string} + */ + BLOB_PUBLIC_ACCESS: 'x-ms-blob-public-access', + + /** + * The header that specifies container immutability policy. + * + * @const + * @type {boolean} + */ + HAS_IMMUTABILITY_POLICY: 'x-ms-has-immutability-policy', + + /** + * The header that specifies container has legal hold. + * + * @const + * @type {boolean} + */ + HAS_LEGAL_HOLD: 'x-ms-has-legal-hold', + + /** + * The header for the blob type. + * + * @const + * @type {string} + */ + BLOB_TYPE: 'x-ms-blob-type', + + /** + * The header for the type. + * + * @const + * @type {string} + */ + TYPE: 'x-ms-type', + + /** + * Specifies the block blob type. + * + * @const + * @type {string} + */ + BLOCK_BLOB: 'blockblob', + + /** + * The CacheControl header. + * + * @const + * @type {string} + */ + CACHE_CONTROL: 'cache-control', + + /** + * The header that specifies blob caching control. + * + * @const + * @type {string} + */ + BLOB_CACHE_CONTROL: 'x-ms-blob-cache-control', + + /** + * The header that specifies caching control. + * + * @const + * @type {string} + */ + FILE_CACHE_CONTROL: 'x-ms-cache-control', + + /** + * The copy status. + * + * @const + * @type {string} + */ + COPY_STATUS: 'x-ms-copy-status', + + /** + * The copy completion time + * + * @const + * @type {string} + */ + COPY_COMPLETION_TIME: 'x-ms-copy-completion-time', + + /** + * The copy status message + * + * @const + * @type {string} + */ + COPY_STATUS_DESCRIPTION: 'x-ms-copy-status-description', + + /** + * The copy identifier. + * + * @const + * @type {string} + */ + COPY_ID: 'x-ms-copy-id', + + /** + * Progress of any copy operation + * + * @const + * @type {string} + */ + COPY_PROGRESS: 'x-ms-copy-progress', + + /** + * The copy action. + * + * @const + * @type {string} + */ + COPY_ACTION: 'x-ms-copy-action', + + /** + * Flag if the blob is incremental copy blob. + * + * @const + * @type {string} + */ + INCREMENTAL_COPY: 'x-ms-incremental-copy', + + /** + * Snapshot time of the last successful incremental copy snapshot for this blob. + * + * @const + * @type {string} + */ + COPY_DESTINATION_SNAPSHOT: 'x-ms-copy-destination-snapshot', + + /** + * The ContentID header. + * + * @const + * @type {string} + */ + CONTENT_ID: 'content-id', + + /** + * The ContentEncoding header. + * + * @const + * @type {string} + */ + CONTENT_ENCODING: 'content-encoding', + + /** + * The header that specifies blob content encoding. + * + * @const + * @type {string} + */ + BLOB_CONTENT_ENCODING: 'x-ms-blob-content-encoding', + + /** + * The header that specifies content encoding. + * + * @const + * @type {string} + */ + FILE_CONTENT_ENCODING: 'x-ms-content-encoding', + + /** + * The ContentLangauge header. + * + * @const + * @type {string} + */ + CONTENT_LANGUAGE: 'content-language', + + /** + * The header that specifies blob content language. + * + * @const + * @type {string} + */ + BLOB_CONTENT_LANGUAGE: 'x-ms-blob-content-language', + + /** + * The header that specifies content language. + * + * @const + * @type {string} + */ + FILE_CONTENT_LANGUAGE: 'x-ms-content-language', + + /** + * The ContentLength header. + * + * @const + * @type {string} + */ + CONTENT_LENGTH: 'content-length', + + /** + * The header that specifies blob content length. + * + * @const + * @type {string} + */ + BLOB_CONTENT_LENGTH: 'x-ms-blob-content-length', + + /** + * The header that specifies content length. + * + * @const + * @type {string} + */ + FILE_CONTENT_LENGTH: 'x-ms-content-length', + + /** + * The ContentDisposition header. + * @const + * @type {string} + */ + CONTENT_DISPOSITION: 'content-disposition', + + /** + * The header that specifies blob content disposition. + * + * @const + * @type {string} + */ + BLOB_CONTENT_DISPOSITION: 'x-ms-blob-content-disposition', + + /** + * The header that specifies content disposition. + * + * @const + * @type {string} + */ + FILE_CONTENT_DISPOSITION: 'x-ms-content-disposition', + + /** + * The ContentMD5 header. + * + * @const + * @type {string} + */ + CONTENT_MD5: 'content-md5', + + /** + * The header that specifies blob content MD5. + * + * @const + * @type {string} + */ + BLOB_CONTENT_MD5: 'x-ms-blob-content-md5', + + /** + * The header that specifies content MD5. + * + * @const + * @type {string} + */ + FILE_CONTENT_MD5: 'x-ms-content-md5', + + /** + * The ContentRange header. + * + * @const + * @type {string} + */ + CONTENT_RANGE: 'cache-range', + + /** + * The ContentType header. + * + * @const + * @type {string} + */ + CONTENT_TYPE: 'content-type', + + /** + * The header that specifies blob content type. + * + * @const + * @type {string} + */ + BLOB_CONTENT_TYPE: 'x-ms-blob-content-type', + + /** + * The header that specifies content type. + * + * @const + * @type {string} + */ + FILE_CONTENT_TYPE: 'x-ms-content-type', + + /** + * The header for copy source. + * + * @const + * @type {string} + */ + COPY_SOURCE: 'x-ms-copy-source', + + /** + * The header that specifies the date. + * + * @const + * @type {string} + */ + DATE: 'date', + + /** + * The header that specifies the date. + * + * @const + * @type {string} + */ + MS_DATE: 'x-ms-date', + + /** + * The header to delete snapshots. + * + * @const + * @type {string} + */ + DELETE_SNAPSHOT: 'x-ms-delete-snapshots', + + /** + * The ETag header. + * + * @const + * @type {string} + */ + ETAG: 'etag', + + /** + * The IfMatch header. + * + * @const + * @type {string} + */ + IF_MATCH: 'if-match', + + /** + * The IfModifiedSince header. + * + * @const + * @type {string} + */ + IF_MODIFIED_SINCE: 'if-modified-since', + + /** + * The IfNoneMatch header. + * + * @const + * @type {string} + */ + IF_NONE_MATCH: 'if-none-match', + + /** + * The IfUnmodifiedSince header. + * + * @const + * @type {string} + */ + IF_UNMODIFIED_SINCE: 'if-unmodified-since', + + /** + * Specifies snapshots are to be included. + * + * @const + * @type {string} + */ + INCLUDE_SNAPSHOTS_VALUE: 'include', + + /** + * Specifies that the content-type is JSON. + * + * @const + * @type {string} + */ + JSON_CONTENT_TYPE_VALUE: 'application/json', + + /** + * The header that specifies storage SKU, also known as account type. + * + * @const + * @type {string} + */ + SKU_NAME: 'x-ms-sku-name', + + /** + * The header that describes the flavour of the storage account, also known as account kind. + * + * @const + * @type {string} + */ + ACCOUNT_KIND: 'x-ms-account-kind', + + /** + * The header that specifies lease ID. + * + * @const + * @type {string} + */ + LEASE_ID: 'x-ms-lease-id', + + /** + * The header that specifies the lease break period. + * + * @const + * @type {string} + */ + LEASE_BREAK_PERIOD: 'x-ms-lease-break-period', + + /** + * The header that specifies the proposed lease identifier. + * + * @const + * @type {string} + */ + PROPOSED_LEASE_ID: 'x-ms-proposed-lease-id', + + /** + * The header that specifies the lease duration. + * + * @const + * @type {string} + */ + LEASE_DURATION: 'x-ms-lease-duration', + + /** + * The header that specifies the source lease ID. + * + * @const + * @type {string} + */ + SOURCE_LEASE_ID: 'x-ms-source-lease-id', + + /** + * The header that specifies lease time. + * + * @const + * @type {string} + */ + LEASE_TIME: 'x-ms-lease-time', + + /** + * The header that specifies lease status. + * + * @const + * @type {string} + */ + LEASE_STATUS: 'x-ms-lease-status', + + /** + * The header that specifies lease state. + * + * @const + * @type {string} + */ + LEASE_STATE: 'x-ms-lease-state', + + /** + * Specifies the page blob type. + * + * @const + * @type {string} + */ + PAGE_BLOB: 'PageBlob', + + /** + * The header that specifies page write mode. + * + * @const + * @type {string} + */ + PAGE_WRITE: 'x-ms-page-write', + + /** + * The header that specifies file range write mode. + * + * @const + * @type {string} + */ + FILE_WRITE: 'x-ms-write', + + /** + * The header that specifies whether the response should include the inserted entity. + * + * @const + * @type {string} + */ + PREFER: 'Prefer', + + /** + * The header value which specifies that the response should include the inserted entity. + * + * @const + * @type {string} + */ + PREFER_CONTENT: 'return-content', + + /** + * The header value which specifies that the response should not include the inserted entity. + * + * @const + * @type {string} + */ + PREFER_NO_CONTENT: 'return-no-content', + + /** + * The header prefix for metadata. + * + * @const + * @type {string} + */ + PREFIX_FOR_STORAGE_METADATA: 'x-ms-meta-', + + /** + * The header prefix for properties. + * + * @const + * @type {string} + */ + PREFIX_FOR_STORAGE_PROPERTIES: 'x-ms-prop-', + + /** + * The Range header. + * + * @const + * @type {string} + */ + RANGE: 'Range', + + /** + * The Source Range header. + * + * @const + * @type {string} + */ + SOURCE_RANGE: 'x-ms-source-range', + + /** + * The header that specifies if the request will populate the ContentMD5 header for range gets. + * + * @const + * @type {string} + */ + RANGE_GET_CONTENT_MD5: 'x-ms-range-get-content-md5', + + /** + * The format string for specifying ranges. + * + * @const + * @type {string} + */ + RANGE_HEADER_FORMAT: 'bytes:%d-%d', + + /** + * The header that indicates the request ID. + * + * @const + * @type {string} + */ + REQUEST_ID: 'x-ms-request-id', + + /** + * The header for specifying the sequence number. + * + * @const + * @type {string} + */ + SEQUENCE_NUMBER: 'x-ms-blob-sequence-number', + + /** + * The header for specifying the If-Sequence-Number-EQ condition. + * + * @const + * @type {string} + */ + SEQUENCE_NUMBER_EQUAL: 'x-ms-if-sequence-number-eq', + + /** + * The header for specifying the If-Sequence-Number-LT condition. + * + * @const + * @type {string} + */ + SEQUENCE_NUMBER_LESS_THAN: 'x-ms-if-sequence-number-lt', + + /** + * The header for specifying the If-Sequence-Number-LE condition. + * + * @const + * @type {string} + */ + SEQUENCE_NUMBER_LESS_THAN_OR_EQUAL: 'x-ms-if-sequence-number-le', + + /** + * The header that specifies sequence number action. + * + * @const + * @type {string} + */ + SEQUENCE_NUMBER_ACTION: 'x-ms-sequence-number-action', + + /** + * The header for the blob content length. + * + * @const + * @type {string} + */ + SIZE: 'x-ms-blob-content-length', + + /** + * The header for snapshots. + * + * @const + * @type {string} + */ + SNAPSHOT: 'x-ms-snapshot', + + /** + * Specifies only snapshots are to be included. + * + * @const + * @type {string} + */ + SNAPSHOTS_ONLY_VALUE: 'only', + + /** + * The header for the If-Match condition. + * + * @const + * @type {string} + */ + SOURCE_IF_MATCH: 'x-ms-source-if-match', + + /** + * The header for the If-Modified-Since condition. + * + * @const + * @type {string} + */ + SOURCE_IF_MODIFIED_SINCE: 'x-ms-source-if-modified-since', + + /** + * The header for the If-None-Match condition. + * + * @const + * @type {string} + */ + SOURCE_IF_NONE_MATCH: 'x-ms-source-if-none-match', + + /** + * The header for the If-Unmodified-Since condition. + * + * @const + * @type {string} + */ + SOURCE_IF_UNMODIFIED_SINCE: 'x-ms-source-if-unmodified-since', + + /** + * The header for data ranges. + * + * @const + * @type {string} + */ + STORAGE_RANGE: 'x-ms-range', + + /** + * The header for storage version. + * + * @const + * @type {string} + */ + STORAGE_VERSION: 'x-ms-version', + + /** + * The current storage version header value. + * + * @const + * @type {string} + */ + TARGET_STORAGE_VERSION: '2018-03-28', + + /** + * The UserAgent header. + * + * @const + * @type {string} + */ + USER_AGENT: 'user-agent', + + /** + * The pop receipt header. + * + * @const + * @type {string} + */ + POP_RECEIPT: 'x-ms-popreceipt', + + /** + * The time next visibile header. + * + * @const + * @type {string} + */ + TIME_NEXT_VISIBLE: 'x-ms-time-next-visible', + + /** + * The approximate message counter header. + * + * @const + * @type {string} + */ + APPROXIMATE_MESSAGE_COUNT: 'x-ms-approximate-message-count', + + /** + * The lease action header. + * + * @const + * @type {string} + */ + LEASE_ACTION: 'x-ms-lease-action', + + /** + * The accept header. + * + * @const + * @type {string} + */ + ACCEPT: 'accept', + + /** + * The accept charset header. + * + * @const + * @type {string} + */ + ACCEPT_CHARSET: 'Accept-Charset', + + /** + * The host header. + * + * @const + * @type {string} + */ + HOST: 'host', + + /** + * The correlation identifier header. + * + * @const + * @type {string} + */ + CORRELATION_ID: 'x-ms-correlation-id', + + /** + * The group identifier header. + * + * @const + * @type {string} + */ + GROUP_ID: 'x-ms-group-id', + + /** + * The share quota header. + * + * @const + * @type {string} + */ + SHARE_QUOTA: 'x-ms-share-quota', + + /** + * The max blob size header. + * + * @const + * @type {string} + */ + BLOB_CONDITION_MAX_SIZE: 'x-ms-blob-condition-maxsize', + + /** + * The append blob position header. + * + * @const + * @type {string} + */ + BLOB_CONDITION_APPEND_POSITION: 'x-ms-blob-condition-appendpos', + + /** + * The append blob append offset header. + * + * @const + * @type {string} + */ + BLOB_APPEND_OFFSET: 'x-ms-blob-append-offset', + + /** + * The append blob committed block header. + * + * @const + * @type {string} + */ + BLOB_COMMITTED_BLOCK_COUNT: 'x-ms-blob-committed-block-count', + + /** + * If the contents of the request have been successfully encrypted using the specified algorithm. + * + * @const + * @type {string} + */ + REQUEST_SERVER_ENCRYPTED: 'x-ms-request-server-encrypted', + + /** + * If the data and application metadata are completely encrypted using the specified algorithm. + * + * @const + * @type {string} + */ + SERVER_ENCRYPTED: 'x-ms-server-encrypted', + + /** + * Header indicates the resulting tier of the blob. + * + * @const + * @type {string} + */ + ACCESS_TIER: 'x-ms-access-tier', + + /** + * This is the datetime of when the last time tier was changed on the blob. + * + * @const + * @type {string} + */ + ACCESS_TIER_CHANGE_TIME: 'x-ms-access-tier-change-time', + + /** + * If the access tier is not explicitly set on the blob, + * the tier is inferred based on its content length + * and this header will be returned with true value. + * + * @const + * @type {string} + */ + ACCESS_TIER_INFERRED: 'x-ms-access-tier-inferred', + + /** + * For BlobStorage accounts, the header is returned if archive tier is set + * and rehydrate operation is pending for the request version is 2017-04-17 or later. + * The valid values are rehydrate-pending-to-hot or rehydrate-pending-to-cool. + * + * @const + * @type {string} + */ + ARCHIVE_STATUS: 'x-ms-archive-status' + }, + + QueryStringConstants: { + + /** + * Query component for SAS API version. + * @const + * @type {string} + */ + API_VERSION: 'api-version', + + /** + * The Comp value. + * + * @const + * @type {string} + */ + COMP: 'comp', + + /** + * The Res Type. + * + * @const + * @type {string} + */ + RESTYPE: 'restype', + + /** + * The copy Id. + * @const + * @type {string} + */ + COPY_ID: 'copyid', + + /** + * The snapshot value. + * + * @const + * @type {string} + */ + SNAPSHOT: 'snapshot', + + /** + * The share snapshot value. + * + * @const + * @type {string} + */ + SHARE_SNAPSHOT: 'sharesnapshot', + + /** + * The previous snapshot value. + * + * @const + * @type {string} + */ + PREV_SNAPSHOT: 'prevsnapshot', + + /** + * The timeout value. + * + * @const + * @type {string} + */ + TIMEOUT: 'timeout', + + /** + * The signed start time query string argument for shared access signature. + * + * @const + * @type {string} + */ + SIGNED_START: 'st', + + /** + * The signed expiry time query string argument for shared access signature. + * + * @const + * @type {string} + */ + SIGNED_EXPIRY: 'se', + + /** + * The signed resource query string argument for shared access signature. + * + * @const + * @type {string} + */ + SIGNED_RESOURCE: 'sr', + + /** + * The signed permissions query string argument for shared access signature. + * + * @const + * @type {string} + */ + SIGNED_PERMISSIONS: 'sp', + + /** + * The signed services query string argument for shared access signature. + * + * @const + * @type {string} + */ + SIGNED_SERVICES: 'ss', + + /** + * The signed resource types query string argument for shared access signature. + * + * @const + * @type {string} + */ + SIGNED_RESOURCE_TYPES: 'srt', + + /** + * The signed IP query string argument for shared access signature. + * + * @const + * @type {string} + */ + SIGNED_IP: 'sip', + + /** + * The signed protocol query string argument for shared access signature. + * + * @const + * @type {string} + */ + SIGNED_PROTOCOL: 'spr', + + /** + * The signed identifier query string argument for shared access signature. + * + * @const + * @type {string} + */ + SIGNED_IDENTIFIER: 'si', + + /** + * The signature query string argument for shared access signature. + * + * @const + * @type {string} + */ + SIGNATURE: 'sig', + + /** + * The signed version argument for shared access signature. + * + * @const + * @type {string} + */ + SIGNED_VERSION: 'sv', + + /** + * The cache control argument for shared access signature. + * + * @const + * @type {string} + */ + CACHE_CONTROL: 'rscc', + + /** + * The content type argument for shared access signature. + * + * @const + * @type {string} + */ + CONTENT_TYPE: 'rsct', + + /** + * The content encoding argument for shared access signature. + * + * @const + * @type {string} + */ + CONTENT_ENCODING: 'rsce', + + /** + * The content language argument for shared access signature. + * + * @const + * @type {string} + */ + CONTENT_LANGUAGE: 'rscl', + + /** + * The content disposition argument for shared access signature. + * + * @const + * @type {string} + */ + CONTENT_DISPOSITION: 'rscd', + + /** + * The block identifier query string argument for blob service. + * + * @const + * @type {string} + */ + BLOCK_ID: 'blockid', + + /** + * The block list type query string argument for blob service. + * + * @const + * @type {string} + */ + BLOCK_LIST_TYPE: 'blocklisttype', + + /** + * The prefix query string argument for listing operations. + * + * @const + * @type {string} + */ + PREFIX: 'prefix', + + /** + * The marker query string argument for listing operations. + * + * @const + * @type {string} + */ + MARKER: 'marker', + + /** + * The maxresults query string argument for listing operations. + * + * @const + * @type {string} + */ + MAX_RESULTS: 'maxresults', + + /** + * The delimiter query string argument for listing operations. + * + * @const + * @type {string} + */ + DELIMITER: 'delimiter', + + /** + * The include query string argument for listing operations. + * + * @const + * @type {string} + */ + INCLUDE: 'include', + + /** + * The peekonly query string argument for queue service. + * + * @const + * @type {string} + */ + PEEK_ONLY: 'peekonly', + + /** + * The numofmessages query string argument for queue service. + * + * @const + * @type {string} + */ + NUM_OF_MESSAGES: 'numofmessages', + + /** + * The popreceipt query string argument for queue service. + * + * @const + * @type {string} + */ + POP_RECEIPT: 'popreceipt', + + /** + * The visibilitytimeout query string argument for queue service. + * + * @const + * @type {string} + */ + VISIBILITY_TIMEOUT: 'visibilitytimeout', + + /** + * The messagettl query string argument for queue service. + * + * @const + * @type {string} + */ + MESSAGE_TTL: 'messagettl', + + /** + * The select query string argument. + * + * @const + * @type {string} + */ + SELECT: '$select', + + /** + * The filter query string argument. + * + * @const + * @type {string} + */ + FILTER: '$filter', + + /** + * The top query string argument. + * + * @const + * @type {string} + */ + TOP: '$top', + + /** + * The skip query string argument. + * + * @const + * @type {string} + */ + SKIP: '$skip', + + /** + * The next partition key query string argument for table service. + * + * @const + * @type {string} + */ + NEXT_PARTITION_KEY: 'NextPartitionKey', + + /** + * The next row key query string argument for table service. + * + * @const + * @type {string} + */ + NEXT_ROW_KEY: 'NextRowKey', + + /** + * The lock identifier for service bus messages. + * + * @const + * @type {string} + */ + LOCK_ID: 'lockid', + + /** + * The table name for table SAS URI's. + * + * @const + * @type {string} + */ + TABLENAME: 'tn', + + /** + * The starting Partition Key for tableSAS URI's. + * + * @const + * @type {string} + */ + STARTPK: 'spk', + + /** + * The starting Partition Key for tableSAS URI's. + * + * @const + * @type {string} + */ + STARTRK: 'srk', + + /** + * The ending Partition Key for tableSAS URI's. + * + * @const + * @type {string} + */ + ENDPK: 'epk', + + /** + * The ending Partition Key for tableSAS URI's. + * + * @const + * @type {string} + */ + ENDRK: 'erk' + }, + + StorageServiceClientConstants: { + /** + * The default protocol. + * + * @const + * @type {string} + */ + DEFAULT_PROTOCOL: 'https:', + + /* + * Used environment variables. + * + * @const + * @enum {string} + */ + EnvironmentVariables: { + AZURE_STORAGE_ACCOUNT: 'AZURE_STORAGE_ACCOUNT', + AZURE_STORAGE_ACCESS_KEY: 'AZURE_STORAGE_ACCESS_KEY', + AZURE_STORAGE_DNS_SUFFIX: 'AZURE_STORAGE_DNS_SUFFIX', + AZURE_STORAGE_CONNECTION_STRING: 'AZURE_STORAGE_CONNECTION_STRING', + HTTP_PROXY: 'HTTP_PROXY', + HTTPS_PROXY: 'HTTPS_PROXY', + EMULATED: 'EMULATED' + }, + + /** + * Default credentials. + */ + DEVSTORE_STORAGE_ACCOUNT: 'devstoreaccount1', + DEVSTORE_STORAGE_ACCESS_KEY: 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', + + /** + * The development store URI. + * + * @const + * @type {string} + */ + DEV_STORE_URI: 'http://127.0.0.1', + + /** + * Development ServiceClient URLs. + */ + DEVSTORE_DEFAULT_PROTOCOL: 'http://', + DEVSTORE_BLOB_HOST: '127.0.0.1:10000', + DEVSTORE_QUEUE_HOST: '127.0.0.1:10001', + DEVSTORE_TABLE_HOST: '127.0.0.1:10002', + + /** + * Production ServiceClient URLs. + */ + CLOUD_BLOB_HOST: 'blob.' + storageDnsSuffix, + CLOUD_QUEUE_HOST: 'queue.' + storageDnsSuffix, + CLOUD_TABLE_HOST: 'table.' + storageDnsSuffix, + CLOUD_FILE_HOST: 'file.' + storageDnsSuffix + }, + + HttpConstants: { + /** + * Http Verbs + * + * @const + * @enum {string} + */ + HttpVerbs: { + PUT: 'PUT', + GET: 'GET', + DELETE: 'DELETE', + POST: 'POST', + MERGE: 'MERGE', + HEAD: 'HEAD' + }, + + /** + * Response codes. + * + * @const + * @enum {int} + */ + HttpResponseCodes: { + Ok: 200, + Created: 201, + Accepted: 202, + NoContent: 204, + PartialContent: 206, + BadRequest: 400, + Unauthorized: 401, + Forbidden: 403, + NotFound: 404, + Conflict: 409, + LengthRequired: 411, + PreconditionFailed: 412 + } + }, + + CompatibleVersionConstants: { + /** + * Constant for the 2013-08-15 version. + * + * @const + * @type {string} + */ + AUGUST_2013: '2013-08-15', + + /** + * Constant for the 2012-02-12 version. + * + * @const + * @type {string} + */ + FEBRUARY_2012: '2012-02-12' + }, + + BlobErrorCodeStrings: { + INVALID_BLOCK_ID: 'InvalidBlockId', + BLOB_NOT_FOUND: 'BlobNotFound', + BLOB_ALREADY_EXISTS: 'BlobAlreadyExists', + CONTAINER_ALREADY_EXISTS: 'ContainerAlreadyExists', + CONTAINER_NOT_FOUND: 'ContainerNotFound', + INVALID_BLOB_OR_BLOCK: 'InvalidBlobOrBlock', + INVALID_BLOCK_LIST: 'InvalidBlockList' + }, + + FileErrorCodeStrings: { + SHARE_ALREADY_EXISTS: 'ShareAlreadyExists', + SHARE_NOT_FOUND: 'ShareNotFound', + FILE_NOT_FOUND: 'FileNotFound' + }, + + QueueErrorCodeStrings: { + QUEUE_NOT_FOUND: 'QueueNotFound', + QUEUE_DISABLED: 'QueueDisabled', + QUEUE_ALREADY_EXISTS: 'QueueAlreadyExists', + QUEUE_NOT_EMPTY: 'QueueNotEmpty', + QUEUE_BEING_DELETED: 'QueueBeingDeleted', + POP_RECEIPT_MISMATCH: 'PopReceiptMismatch', + INVALID_PARAMETER: 'InvalidParameter', + MESSAGE_NOT_FOUND: 'MessageNotFound', + MESSAGE_TOO_LARGE: 'MessageTooLarge', + INVALID_MARKER: 'InvalidMarker' + }, + + /** + * Constants for storage error strings + * + * More details are at: http://msdn.microsoft.com/en-us/library/azure/dd179357.aspx + */ + StorageErrorCodeStrings: { + // Not Modified (304): The condition specified in the conditional header(s) was not met for a read operation. + // Precondition Failed (412): The condition specified in the conditional header(s) was not met for a write operation. + CONDITION_NOT_MET: 'ConditionNotMet', + // Bad Request (400): A required HTTP header was not specified. + MISSING_REQUIRED_HEADER: 'MissingRequiredHeader', + // Bad Request (400): A required XML node was not specified in the request body. + MISSING_REQUIRED_XML_NODE: 'MissingRequiredXmlNode', + // Bad Request (400): One of the HTTP headers specified in the request is not supported. + UNSUPPORTED_HEADER: 'UnsupportedHeader', + // Bad Request (400): One of the XML nodes specified in the request body is not supported. + UNSUPPORTED_XML_NODE: 'UnsupportedXmlNode', + // Bad Request (400): The value provided for one of the HTTP headers was not in the correct format. + INVALID_HEADER_VALUE: 'InvalidHeaderValue', + // Bad Request (400): The value provided for one of the XML nodes in the request body was not in the correct format. + INVALID_XML_NODE_VALUE: 'InvalidXmlNodeValue', + // Bad Request (400): A required query parameter was not specified for this request. + MISSING_REQUIRED_QUERY_PARAMETER: 'MissingRequiredQueryParameter', + // Bad Request (400): One of the query parameters specified in the request URI is not supported. + UNSUPPORTED_QUERY_PARAMETER: 'UnsupportedQueryParameter', + // Bad Request (400): An invalid value was specified for one of the query parameters in the request URI. + INVALID_QUERY_PARAMETER_VALUE: 'InvalidQueryParameterValue', + // Bad Request (400): A query parameter specified in the request URI is outside the permissible range. + OUT_OF_RANGE_QUERY_PARAMETER_VALUE: 'OutOfRangeQueryParameterValue', + // Bad Request (400): The url in the request could not be parsed. + REQUEST_URL_FAILED_TO_PARSE: 'RequestUrlFailedToParse', + // Bad Request (400): The requested URI does not represent any resource on the server. + INVALID_URI: 'InvalidUri', + // Bad Request (400): The HTTP verb specified was not recognized by the server. + INVALID_HTTP_VERB: 'InvalidHttpVerb', + // Bad Request (400): The key for one of the metadata key-value pairs is empty. + EMPTY_METADATA_KEY: 'EmptyMetadataKey', + // Bad Request (400): The specified XML is not syntactically valid. + INVALID_XML_DOCUMENT: 'InvalidXmlDocument', + // Bad Request (400): The MD5 value specified in the request did not match the MD5 value calculated by the server. + MD5_MISMATCH: 'Md5Mismatch', + // Bad Request (400): The MD5 value specified in the request is invalid. The MD5 value must be 128 bits and Base64-encoded. + INVALID_MD5: 'InvalidMd5', + // Bad Request (400): One of the request inputs is out of range. + OUT_OF_RANGE_INPUT: 'OutOfRangeInput', + // Bad Request (400): The authentication information was not provided in the correct format. Verify the value of Authorization header. + INVALID_AUTHENTICATION_INFO: 'InvalidAuthenticationInfo', + // Bad Request (400): One of the request inputs is not valid. + INVALID_INPUT: 'InvalidInput', + // Bad Request (400): The specified metadata is invalid. It includes characters that are not permitted. + INVALID_METADATA: 'InvalidMetadata', + // Bad Request (400): The specifed resource name contains invalid characters. + INVALID_RESOURCE_NAME: 'InvalidResourceName', + // Bad Request (400): The size of the specified metadata exceeds the maximum size permitted. + METADATA_TOO_LARGE: 'MetadataTooLarge', + // Bad Request (400): Condition headers are not supported. + CONDITION_HEADER_NOT_SUPPORTED: 'ConditionHeadersNotSupported', + // Bad Request (400): Multiple condition headers are not supported. + MULTIPLE_CONDITION_HEADER_NOT_SUPPORTED: 'MultipleConditionHeadersNotSupported', + // Forbidden (403): Server failed to authenticate the request. Make sure the value of the Authorization header is formed correctly including the signature. + AUTHENTICATION_FAILED: 'AuthenticationFailed', + // Forbidden (403): Read-access geo-redundant replication is not enabled for the account. + // Forbidden (403): Write operations to the secondary location are not allowed. + // Forbidden (403): The account being accessed does not have sufficient permissions to execute this operation. + INSUFFICIENT_ACCOUNT_PERMISSIONS: 'InsufficientAccountPermissions', + // Not Found (404): The specified resource does not exist. + RESOURCE_NOT_FOUND: 'ResourceNotFound', + // Forbidden (403): The specified account is disabled. + ACCOUNT_IS_DISABLED: 'AccountIsDisabled', + // Method Not Allowed (405): The resource doesn't support the specified HTTP verb. + UNSUPPORTED_HTTP_VERB: 'UnsupportedHttpVerb', + // Conflict (409): The specified account already exists. + ACCOUNT_ALREADY_EXISTS: 'AccountAlreadyExists', + // Conflict (409): The specified account is in the process of being created. + ACCOUNT_BEING_CREATED: 'AccountBeingCreated', + // Conflict (409): The specified resource already exists. + RESOURCE_ALREADY_EXISTS: 'ResourceAlreadyExists', + // Conflict (409): The specified resource type does not match the type of the existing resource. + RESOURCE_TYPE_MISMATCH: 'ResourceTypeMismatch', + // Length Required (411): The Content-Length header was not specified. + MISSING_CONTENT_LENGTH_HEADER: 'MissingContentLengthHeader', + // Request Entity Too Large (413): The size of the request body exceeds the maximum size permitted. + REQUEST_BODY_TOO_LARGE: 'RequestBodyTooLarge', + // Requested Range Not Satisfiable (416): The range specified is invalid for the current size of the resource. + INVALID_RANGE: 'InvalidRange', + // Internal Server Error (500): The server encountered an internal error. Please retry the request. + INTERNAL_ERROR: 'InternalError', + // Internal Server Error (500): The operation could not be completed within the permitted time. + OPERATION_TIMED_OUT: 'OperationTimedOut', + // Service Unavailable (503): The server is currently unable to receive requests. Please retry your request. + SERVER_BUSY: 'ServerBusy', + + // Legacy error code strings + UPDATE_CONDITION_NOT_SATISFIED: 'UpdateConditionNotSatisfied', + CONTAINER_NOT_FOUND: 'ContainerNotFound', + CONTAINER_ALREADY_EXISTS: 'ContainerAlreadyExists', + CONTAINER_DISABLED: 'ContainerDisabled', + CONTAINER_BEING_DELETED: 'ContainerBeingDeleted' + }, + + TableErrorCodeStrings: { + XMETHOD_NOT_USING_POST: 'XMethodNotUsingPost', + XMETHOD_INCORRECT_VALUE: 'XMethodIncorrectValue', + XMETHOD_INCORRECT_COUNT: 'XMethodIncorrectCount', + TABLE_HAS_NO_PROPERTIES: 'TableHasNoProperties', + DUPLICATE_PROPERTIES_SPECIFIED: 'DuplicatePropertiesSpecified', + TABLE_HAS_NO_SUCH_PROPERTY: 'TableHasNoSuchProperty', + DUPLICATE_KEY_PROPERTY_SPECIFIED: 'DuplicateKeyPropertySpecified', + TABLE_ALREADY_EXISTS: 'TableAlreadyExists', + TABLE_NOT_FOUND: 'TableNotFound', + ENTITY_NOT_FOUND: 'EntityNotFound', + ENTITY_ALREADY_EXISTS: 'EntityAlreadyExists', + PARTITION_KEY_NOT_SPECIFIED: 'PartitionKeyNotSpecified', + OPERATOR_INVALID: 'OperatorInvalid', + UPDATE_CONDITION_NOT_SATISFIED: 'UpdateConditionNotSatisfied', + PROPERTIES_NEED_VALUE: 'PropertiesNeedValue', + PARTITION_KEY_PROPERTY_CANNOT_BE_UPDATED: 'PartitionKeyPropertyCannotBeUpdated', + TOO_MANY_PROPERTIES: 'TooManyProperties', + ENTITY_TOO_LARGE: 'EntityTooLarge', + PROPERTY_VALUE_TOO_LARGE: 'PropertyValueTooLarge', + INVALID_VALUE_TYPE: 'InvalidValueType', + TABLE_BEING_DELETED: 'TableBeingDeleted', + TABLE_SERVER_OUT_OF_MEMORY: 'TableServerOutOfMemory', + PRIMARY_KEY_PROPERTY_IS_INVALID_TYPE: 'PrimaryKeyPropertyIsInvalidType', + PROPERTY_NAME_TOO_LONG: 'PropertyNameTooLong', + PROPERTY_NAME_INVALID: 'PropertyNameInvalid', + BATCH_OPERATION_NOT_SUPPORTED: 'BatchOperationNotSupported', + JSON_FORMAT_NOT_SUPPORTED: 'JsonFormatNotSupported', + METHOD_NOT_ALLOWED: 'MethodNotAllowed', + NOT_IMPLEMENTED: 'NotImplemented' + }, + + ConnectionStringKeys: { + USE_DEVELOPMENT_STORAGE_NAME: 'UseDevelopmentStorage', + DEVELOPMENT_STORAGE_PROXY_URI_NAME: 'DevelopmentStorageProxyUri', + DEFAULT_ENDPOINTS_PROTOCOL_NAME: 'DefaultEndpointsProtocol', + ACCOUNT_NAME_NAME: 'AccountName', + ACCOUNT_KEY_NAME: 'AccountKey', + BLOB_ENDPOINT_NAME: 'BlobEndpoint', + FILE_ENDPOINT_NAME: 'FileEndpoint', + QUEUE_ENDPOINT_NAME: 'QueueEndpoint', + TABLE_ENDPOINT_NAME: 'TableEndpoint', + SHARED_ACCESS_SIGNATURE_NAME: 'SharedAccessSignature', + ENDPOINT_SUFFIX_NAME: 'EndpointSuffix', + BLOB_BASE_DNS_NAME: 'blob.core.windows.net', + FILE_BASE_DNS_NAME: 'file.core.windows.net', + QUEUE_BASE_DNS_NAME: 'queue.core.windows.net', + TABLE_BASE_DNS_NAME: 'table.core.windows.net' + } +}; + +module.exports = Constants; + + +/***/ }), + +/***/ 24018: +/***/ ((__unused_webpack_module, exports) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +/** +* Date/time related helper functions +* @module date +* +*/ + +/** +* Generates a Date object which is in the given days from now. +* +* @param {int} days The days timespan. +* @return {Date} +*/ +exports.daysFromNow = function (days) { + var date = new Date(); + date.setDate(date.getDate() + days); + return date; +}; + +/** +* Generates a Date object which is in the given hours from now. +* +* @param {int} hours The hours timespan. +* @return {Date} +*/ +exports.hoursFromNow = function (hours) { + var date = new Date(); + date.setHours(date.getHours() + hours); + return date; +}; + +/** +* Generates a Date object which is in the given minutes from now. +* +* @param {int} minutes The minutes timespan. +* @return {Date} +*/ +exports.minutesFromNow = function (minutes) { + var date = new Date(); + date.setMinutes(date.getMinutes() + minutes); + return date; +}; + +/** +* Generates a Date object which is in the given seconds from now. +* +* @param {int} seconds The seconds timespan. +* @return {Date} +*/ +exports.secondsFromNow = function (seconds) { + var date = new Date(); + date.setSeconds(date.getSeconds() + seconds); + return date; +}; + +/***/ }), + +/***/ 88439: +/***/ ((__unused_webpack_module, exports) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +var rightPad = function (n, number) { + var currentN = '' + n; + while (currentN.length < number) { + currentN = currentN + '0'; + } + + return currentN; +}; + +/** +* Formats a date into an iso 8061 string. +* +* @param {date} date The date to format. +* @param {bool} skipMilliseconds Boolean value indicating if the miliseconds part of the date should not be included. +* @param {integer} millisecondsPading Number of digits to left pad the miliseconds. +* @return {string} The date formated in the ISO 8061 date format. +*/ +exports.format = function (date) { + var dateString = date.toISOString(); + return dateString.substring(0, dateString.length - 1) + '0000Z'; +}; + +/** +* Parses an ISO 8061 date string into a date object. +* +* @param {string} stringDateTime The string with the date to parse in the ISO 8061 format. +* @return {date} The parsed date. +*/ +exports.parse = function (stringDateTime) { + var parts = stringDateTime.split('T'); + var ymd = parts[0].split('-'); + var time = parts[1].split('.'); + var hms = time[0].split(':'); + var ms = 0; + if (time[1]) { + ms = time[1].split('Z'); + } + + var date = new Date(Date.UTC( + parseInt(ymd[0], 10), + parseInt(ymd[1], 10) - 1, + parseInt(ymd[2], 10), + parseInt(hms[0], 10), + parseInt(hms[1], 10), + parseInt(hms[2], 10), + Math.round(parseInt(rightPad(ms[0], 7), 10) / 10000) + )); + + return date; +}; + +/***/ }), + +/***/ 53035: +/***/ ((__unused_webpack_module, __unused_webpack_exports, __nccwpck_require__) => { + +"use strict"; +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + + + +var XMLStringifier = __nccwpck_require__(52782); + +// Patch xmlbuilder to allow Unicode surrogate pair code +// points in XML bodies + +XMLStringifier.prototype.assertLegalChar = function(str) { + var chars, chr; + chars = /[\u0000-\u0008\u000B-\u000C\u000E-\u001F\uFFFE-\uFFFF]/; + chr = str.match(chars); + if (chr) { + throw new Error('Invalid character (' + chr + ') in string: ' + str); + } + return str; +}; + +/***/ }), + +/***/ 13497: +/***/ ((module, exports) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +exports = module.exports; + +var SR = { + ANONYMOUS_ACCESS_BLOBSERVICE_ONLY: 'Anonymous access is only valid for the BlobService.', + ARGUMENT_NULL_OR_EMPTY: 'The argument must not be null or an empty string. Argument name: %s.', + ARGUMENT_NULL_OR_UNDEFINED: 'The argument must not be null or undefined. Argument name: %s.', + ARGUMENT_OUT_OF_RANGE_ERROR: 'The argument is out of range. Argument name: %s, Value passed: %s.', + BATCH_ONE_PARTITION_KEY: 'All entities in the batch must have the same PartitionKey value.', + BATCH_ONE_RETRIEVE: 'If a retrieve operation is part of a batch, it must be the only operation in the batch.', + BATCH_TOO_LARGE: 'Batches must not contain more than 100 operations.', + BLOB_INVALID_SEQUENCE_NUMBER: 'The sequence number may not be specified for an increment operation.', + BLOB_TYPE_MISMATCH: 'Blob type of the blob reference doesn\'t match blob type of the blob.', + CANNOT_CREATE_SAS_WITHOUT_ACCOUNT_KEY: 'Cannot create Shared Access Signature unless the Account Name and Key are used to create the ServiceClient.', + CONTENT_LENGTH_MISMATCH: 'An incorrect number of bytes was read from the connection. The connection may have been closed.', + CONTENT_TYPE_MISSING: 'Content-Type response header is missing or invalid.', + EMPTY_BATCH: 'Batch must not be empty.', + EXCEEDED_SIZE_LIMITATION: 'Upload exceeds the size limitation. Max size is %s but the current size is %s', + HASH_MISMATCH: 'Hash mismatch (integrity check failed), Expected value is %s, retrieved %s.', + INCORRECT_ENTITY_KEYS: 'PartitionKey and RowKey must be specified as strings in the entity object.', + INVALID_BLOB_LENGTH: 'createBlockBlobFromText requires the size of text to be less than 64MB. Please use createBlockBlobFromLocalFile or createBlockBlobFromStream to upload large blobs.', + INVALID_CONNECTION_STRING: 'Connection strings must be of the form "key1=value1;key2=value2".', + INVALID_CONNECTION_STRING_BAD_KEY: 'Connection string contains unrecognized key: "%s"', + INVALID_CONNECTION_STRING_DUPLICATE_KEY: 'Connection string contains duplicate key: "%s"', + INVALID_CONNECTION_STRING_EMPTY_KEY: 'Connection strings must not contain empty keys.', + INVALID_DELETE_SNAPSHOT_OPTION: 'The deleteSnapshots option cannot be included when deleting a specific snapshot using the snapshotId option.', + INVALID_EDM_TYPE: 'The value \'%s\' does not match the type \'%s\'.', + INVALID_FILE_LENGTH: 'createFileFromText requires the size of text to be less than 4MB. Please use createFileFromLocalFile or createFileFromStream to upload large files.', + INVALID_FILE_RANGE_FOR_UPDATE: 'Range size should be less than 4MB for a file range update operation.', + INVALID_HEADERS: 'Headers are not supported in the 2012-02-12 version.', + INVALID_MESSAGE_ID: 'Message ID cannot be null or undefined for deleteMessage and updateMessage operations.', + INVALID_PAGE_BLOB_LENGTH: 'Page blob length must be multiple of 512.', + INVALID_PAGE_END_OFFSET: 'Page end offset must be multiple of 512.', + INVALID_PAGE_RANGE_FOR_UPDATE: 'Page range size should be less than 4MB for a page update operation.', + INVALID_PAGE_START_OFFSET: 'Page start offset must be multiple of 512.', + INVALID_POP_RECEIPT: 'Pop Receipt cannot be null or undefined for deleteMessage and updateMessage operations.', + INVALID_PROPERTY_RESOLVER: 'The specified property resolver returned an invalid type. %s:{_:%s,$:%s }', + INVALID_RANGE_FOR_MD5: 'The requested range should be less than 4MB when contentMD5 is expected from the server', + INVALID_SAS_VERSION: 'SAS Version ? is invalid. Valid versions include: ?.', + INVALID_SAS_TOKEN: 'The SAS token should not contain api-version.', + INVALID_SIGNED_IDENTIFIERS: 'Signed identifiers need to be a hash object with key as the id and the value as the access policy.', + INVALID_STREAM_LENGTH: 'The length of the provided stream is invalid.', + INVALID_STRING_ERROR: 'Invalid string error.', + INVALID_TABLE_OPERATION: 'Operation not found: %s', + INVALID_TEXT_LENGTH: 'The length of the provided text is invalid.', + MAXIMUM_EXECUTION_TIMEOUT_EXCEPTION: 'The client could not finish the operation within specified maximum execution timeout.', + MD5_NOT_POSSIBLE: 'MD5 cannot be calculated for an existing blob because it would require reading the existing data. Please disable storeBlobContentMD5.', + MD5_NOT_PRESENT_ERROR: 'MD5 does not exist. If you do not want to force validation, please disable useTransactionalMD5.', + METADATA_KEY_INVALID: 'The key for one of the metadata key-value pairs is null, empty, or whitespace.', + METADATA_VALUE_INVALID: 'The value for one of the metadata key-value pairs is null, empty, or whitespace.', + NO_CREDENTIALS_PROVIDED: 'Credentials must be provided when creating a service client.', + PRIMARY_ONLY_COMMAND: 'This operation can only be executed against the primary storage location.', + QUERY_OPERATOR_REQUIRES_WHERE: '%s operator needs to be used after where.', + SECONDARY_ONLY_COMMAND: 'This operation can only be executed against the secondary storage location.', + STORAGE_HOST_LOCATION_REQUIRED: 'The host for the storage service must be specified.', + STORAGE_HOST_MISSING_LOCATION: 'The host for the target storage location is not specified. Please consider changing the request\'s location mode.', + TYPE_NOT_SUPPORTED: 'Type not supported when sending data to the service: ', + MAX_BLOB_SIZE_CONDITION_NOT_MEET: 'Append block data should not exceed the maximum blob size condition value.', +}; + +module.exports = SR; + +/***/ }), + +/***/ 68003: +/***/ ((module) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Expose 'StorageUtilities'. + +/** +* Defines constants, enums, and utility functions for use with storage. +* @namespace +*/ +var StorageUtilities = { + /** + * Specifies the location mode used to decide which location the request should be sent to. + * + * @const + * @enum {number} + */ + LocationMode: { + /** The primary location only */ + PRIMARY_ONLY: 0, + /** The primary location first, then the secondary */ + PRIMARY_THEN_SECONDARY: 1, + /** The secondary location only */ + SECONDARY_ONLY: 2, + /** The secondary location first, then the primary */ + SECONDARY_THEN_PRIMARY: 3 + } +}; + +module.exports = StorageUtilities; + +/***/ }), + +/***/ 3396: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +var _ = __nccwpck_require__(7404); +var util = __nccwpck_require__(73837); +var url = __nccwpck_require__(57310); +var stream = __nccwpck_require__(12781); +var Constants = __nccwpck_require__(60658); +var Md5Wrapper = __nccwpck_require__(11007); +var StorageUtilities = __nccwpck_require__(68003); +var SR = __nccwpck_require__(13497); + +/** +* Trim the default port in the url. +* +* @param {string} uri The URI to be encoded. +* @return {string} The URI without defualt port. +*/ +exports.trimPortFromUri = function (uri) { + var uri = url.parse(uri); + if ((uri.protocol === Constants.HTTPS && uri.port == Constants.DEFAULT_HTTPS_PORT) || (uri.protocol === Constants.HTTP && uri.port == Constants.DEFAULT_HTTP_PORT)) { + uri.host = uri.hostname; + } + return url.format(uri); +}; + +/** +* Returns the number of keys (properties) in an object. +* +* @param {object} value The object which keys are to be counted. +* @return {number} The number of keys in the object. +*/ +exports.objectKeysLength = function (value) { + if (!value) { + return 0; + } + + return _.keys(value).length; +}; + +/** +* Checks if in a browser environment. +* +* @return {bool} True if in a browser environment, false otherwise. +*/ +exports.isBrowser = function () { + return typeof window !== 'undefined'; +}; + +/** +* Checks if in IE. +* +* @return {bool} True if in IE, false otherwise. +*/ +exports.isIE = function () { + if (!exports.isBrowser()) { + return false; + } + + var ua = window.navigator.userAgent; + var msie = ua.indexOf('MSIE '); + var trident = ua.indexOf('Trident/'); + return msie > 0 || trident > 0; +}; + +/** +* Checks if in a 32bit Node.js environment. +* +* @return {bool} True if in a 32bit Node.js environment, false otherwise. +*/ +exports.is32 = function () { + return !exports.isBrowser() && process.arch === 'ia32'; +}; + +/** +* Checks if a value is null or undefined. +* +* @param {object} value The value to check for null or undefined. +* @return {bool} True if the value is null or undefined, false otherwise. +*/ +exports.objectIsNull = function (value) { + return _.isNull(value) || _.isUndefined(value); +}; + +/** +* Checks if an object is empty. +* +* @param {object} object The object to check if it is null. +* @return {bool} True if the object is empty, false otherwise. +*/ +exports.objectIsEmpty = function (object) { + return _.isEmpty(object); +}; + +/** +* Determines if an object contains an integer number. +* +* @param {object} value The object to assert. +* @return {bool} True if the object contains an integer number; false otherwise. +*/ +exports.objectIsInt = function (value) { + return typeof value === 'number' && parseFloat(value) == parseInt(value, 10) && !isNaN(value); +}; + +/** +* Determines if an object is a NaN. +* +* @param {object} value The object to assert. +* @return {bool} True if the object is a NaN; false otherwise. +*/ +exports.objectIsNaN = function (value) { + return typeof(value) === 'number' && isNaN(value); +}; + +/** +* Checks if an object is a string. +* +* @param {object} object The object to check if it is a string. +* @return {bool} True if the object is a string, false otherwise. +*/ +exports.objectIsString = function (object) { + return _.isString(object); +}; + +/** +* Check if an object is a function +* @param {object} object The object to check whether it is function +* @return {bool} True if the specified object is function, otherwise false +*/ +exports.objectIsFunction = function (object) { + return _.isFunction(object); +}; + + +/** +* Front zero padding of string to sepcified length +*/ +exports.zeroPaddingString = function(str, len) { + var paddingStr = '0000000000' + str; + if(paddingStr.length < len) { + return exports.zeroPaddingString(paddingStr, len); + } else { + return paddingStr.substr(-1 * len); + } +}; + +/** +* Checks if a value is an empty string, null or undefined. +* +* @param {object} value The value to check for an empty string, null or undefined. +* @return {bool} True if the value is an empty string, null or undefined, false otherwise. +*/ +exports.stringIsEmpty = function (value) { + return _.isNull(value) || _.isUndefined(value) || value === ''; +}; + +/** +* Checks if a value is null, empty, undefined or consists only of white-space characters. +* +* @param {object} value The value to check for null, empty, undefined and white-space only characters. +* @return {bool} True if the value is an empty string, null, undefined, or consists only of white-space characters, false otherwise. +*/ +exports.IsNullOrEmptyOrUndefinedOrWhiteSpace = function (value) { + if(_.isNull(value) || _.isUndefined(value) || value === '') { + return true; + } + + if(_.isString(value) && value.trim().length === 0) { + return true; + } + + return false; +}; + +/** +* Formats a text replacing '?' by the arguments. +* +* @param {string} text The string where the ? should be replaced. +* @param {array} arguments Value(s) to insert in question mark (?) parameters. +* @return {string} +*/ +exports.stringFormat = function (text) { + if (arguments.length > 1) { + for (var i = 1; text.indexOf('?') !== -1; i++) { + text = text.replace('?', arguments[i]); + } + } + + return text; +}; + +/** +* Determines if a string starts with another. +* +* @param {string} text The string to assert. +* @param {string} prefix The string prefix. +* @return {Bool} True if the string starts with the prefix; false otherwise. +*/ +exports.stringStartsWith = function (text, prefix) { + if (_.isNull(prefix)) { + return true; + } + + return text.substr(0, prefix.length) === prefix; +}; + +/** +* Determines if a string ends with another. +* +* @param {string} text The string to assert. +* @param {string} suffix The string suffix. +* @return {Bool} True if the string ends with the suffix; false otherwise. +*/ +exports.stringEndsWith = function (text, suffix) { + if (_.isNull(suffix)) { + return true; + } + + return text.substr(text.length - suffix.length) === suffix; +}; + +/** +* Removes the BOM from a string. +* +* @param {string} str The string from where the BOM is to be removed +* @return {string} The string without the BOM. +*/ +exports.removeBOM = function (str) { + if (str.charCodeAt(0) === 0xfeff || str.charCodeAt(0) === 0xffef) { + str = str.substring(1); + } + + return str; +}; + +/** +* Merges multiple objects. +* +* @param {object} object The objects to be merged +* @return {object} The merged object. +*/ +exports.merge = function () { + return _.extend.apply(this, arguments); +}; + +/** +* Checks if a value exists in an array. The comparison is done in a case +* insensitive manner. +* +* @param {string} needle The searched value. +* @param {array} haystack The array. +* +* @static +* +* @return {boolean} +*/ +exports.inArrayInsensitive = function (needle, haystack) { + return _.contains(_.map(haystack, function (h) { return h.toLowerCase(); }), needle.toLowerCase()); +}; + +/** +* Returns the specified value of the key passed from object and in case that +* this key doesn't exist, the default value is returned. The key matching is +* done in a case insensitive manner. +* +* @param {string} key The array key. +* @param {object} haystack The object to be used. +* @param {mix} default The value to return if $key is not found in $array. +* +* @static +* +* @return mix +*/ +exports.tryGetValueInsensitive = function (key, haystack, defaultValue) { + if (haystack) { + for (var i in haystack) { + if (haystack.hasOwnProperty(i) && i.toString().toLowerCase() === key.toString().toLowerCase()) { + return haystack[i]; + } + } + } + + return defaultValue; +}; + +/** +* Returns the value in a chained object. +* +* @param {object} object The object with the values. +* @param {array} keys The keys. +* @param {mix} default The value to return if $key is not found in $array. +* +* @static +* +* @return mix +*/ +exports.tryGetValueChain = function (object, keys, defaultValue) { + if (keys.length === 0) { + return object; + } + + var currentKey = keys.shift(); + if (object && object[currentKey] !== undefined) { + return exports.tryGetValueChain(object[currentKey], keys, defaultValue); + } + + return defaultValue; +}; + +/** +* Set the value of an inner property of an object. +* +* @param {object} object The target object. +* @param {array} keys The property chain keys. +* @param {mix} object The value to be set. +* +* @static + +* @example +* // Set targetObject.propA.propB to 'testValue' +* var targetObject = {}; +* util.setObjectInnerPropertyValue(targetObject, ['propA', 'propB'], 'testValue'); +*/ +exports.setObjectInnerPropertyValue = function(object, propertyChainKeys, value){ + if(!object || propertyChainKeys.length < 1) { + return; + } + + var currentKey = propertyChainKeys.shift(); + if(propertyChainKeys.length === 0) { + object[currentKey] = value; + return; + } + + if (!object[currentKey]) { + object[currentKey] = {}; + } + + exports.setObjectInnerPropertyValue(object[currentKey], propertyChainKeys, value); +}; + +/** +* Rounds a date off to seconds. +* +* @param {Date} a date +* @return {string} the date in ISO8061 format, with no milliseconds component +*/ +exports.truncatedISO8061Date = function (date) { + var dateString = date.toISOString(); + return dateString.substring(0, dateString.length - 5) + 'Z'; +}; + +exports.normalizeArgs = function (optionsOrCallback, callback, result) { + var options = {}; + if(_.isFunction(optionsOrCallback) && !callback) { + callback = optionsOrCallback; + } else if (optionsOrCallback) { + options = optionsOrCallback; + } + + result(options, callback); +}; + +exports.getNodeVersion = function () { + var parsedVersion = process.version.split('.'); + return { + major: parseInt(parsedVersion[0].substr(1), 10), + minor: parseInt(parsedVersion[1], 10), + patch: parseInt(parsedVersion[2], 10) + }; +}; + +/** +* Calculate md5sum for the stream +* @ignore +*/ +exports.calculateMD5 = function(readStream, bufferLength, options, callback) { + var internalBuff = Buffer.alloc(bufferLength); + var index = 0; + var internalHash = new Md5Wrapper().createMd5Hash(); + readStream.on('data', function(data) { + if (index + data.length > bufferLength) { + var copyLength = bufferLength - index; + if (copyLength > 0) { + data = data.slice(0, copyLength); + data.copy(internalBuff, index); + internalHash.update(data); + index += copyLength; + } + readStream.emit('end'); + } else { + data.copy(internalBuff, index); + internalHash.update(data); + index += data.length; + } + }).on('end', function() { + if (!readStream.endEmitted) { + internalBuff = internalBuff.slice(0, index); + var contentMD5 = internalHash.digest('base64'); + // Set the flag to be compatible with Nodejs 0.10 which will keep emitting data from + // the file stream when the read stream has emitted the end event from its listner. + readStream.endEmitted = true; + callback(internalBuff, contentMD5); + } + }); +}; + +/** +* Whether the content of buffer is all zero +*/ +exports.isBufferAllZero = function (buffer) { + for(var i = 0, len = buffer.length; i < len; i++) { + if (buffer[i] !== 0) { + return false; + } + } + return true; +}; + +/** +* Write zero to stream +*/ +var zeroBuffer = null; +exports.writeZerosToStream = function (stream, length, md5Hash, progressCallback, callback) { + var defaultBufferSize = Constants.BlobConstants.DEFAULT_WRITE_BLOCK_SIZE_IN_BYTES; + var bufferSize = Math.min(defaultBufferSize, length); + var remaining = length - bufferSize; + var buffer = null; + if (bufferSize == defaultBufferSize) { + if (!zeroBuffer) { + zeroBuffer = Buffer.alloc(defaultBufferSize); + zeroBuffer.fill(0); + } + buffer = zeroBuffer; + } else { + buffer = Buffer.alloc(bufferSize); + buffer.fill(0); + } + if (md5Hash) { + md5Hash.update(buffer); + } + //We can only write the entire buffer to stream instead of part of buffer. + return stream.write(buffer, function () { + if (exports.objectIsFunction(progressCallback)) { + progressCallback(null, buffer.length); + } + buffer = null; + if (remaining > 0) { + exports.writeZerosToStream(stream, remaining, md5Hash, progressCallback, callback); + } else if (exports.objectIsFunction(callback)) { + callback(null, null); + } + }); +}; + +/** +* Calculate md5sum for the content +*/ +exports.getContentMd5 = function (content, encoding) { + if (!encoding) encoding = 'base64'; + var internalHash = new Md5Wrapper().createMd5Hash(); + internalHash.update(content, 'utf8'); + return internalHash.digest(encoding); +}; + +exports.getNextLocation = function(lastLocation, locationMode) { + switch(locationMode) { + case StorageUtilities.LocationMode.PRIMARY_ONLY: + return Constants.StorageLocation.PRIMARY; + case StorageUtilities.LocationMode.SECONDARY_ONLY: + return Constants.StorageLocation.SECONDARY; + case StorageUtilities.LocationMode.PRIMARY_THEN_SECONDARY: + case StorageUtilities.LocationMode.SECONDARY_THEN_PRIMARY: + return (lastLocation === Constants.StorageLocation.PRIMARY) ? Constants.StorageLocation.SECONDARY : Constants.StorageLocation.PRIMARY; + default: + throw new RangeError(util.format(SR.ARGUMENT_OUT_OF_RANGE_ERROR, 'locationMode', locationMode)); + } +}; + +exports.getNextListingLocationMode = function (token) { + if(_.isNull(token) || _.isUndefined(token)) { + return Constants.RequestLocationMode.PRIMARY_OR_SECONDARY; + } + else { + switch (token.targetLocation) { + case Constants.StorageLocation.PRIMARY: + return Constants.RequestLocationMode.PRIMARY_ONLY; + case Constants.StorageLocation.SECONDARY: + return Constants.RequestLocationMode.SECONDARY_ONLY; + default: + throw new RangeError(util.format(SR.ARGUMENT_OUT_OF_RANGE_ERROR, 'targetLocation', token.targetLocation)); + } + } +}; + +exports.isStreamPaused = function (object) { + if (object instanceof stream) { + return object._paused === true || (object._readableState && object._readableState.flowing === false); + } + return false; +}; + +/** +* Parse copy progress string in the format of bytesCopied/totalBytes +*/ +exports.parseCopyProgress = function (progress) { + if (typeof progress != 'string' || progress.indexOf('/') === -1) { + return {}; + } + + var progressInfo = progress.split('/'); + return { bytesCopied: progressInfo[0], totalBytes: progressInfo[1] }; +}; + +/** +* The list of the properties should be normalized with explicit mapping +*/ +var normalizePropertyNameExceptionList = { + 'x-ms-blob-sequence-number': 'sequenceNumber', + 'content-Type': 'contentSettings.contentType', + 'content-Encoding': 'contentSettings.contentEncoding', + 'content-Language': 'contentSettings.contentLanguage', + 'cache-Control': 'contentSettings.cacheControl', + 'content-Disposition': 'contentSettings.contentDisposition', + 'content-MD5': 'contentSettings.contentMD5', + 'leaseId': 'lease.id', + 'leaseStatus': 'lease.status', + 'leaseDuration': 'lease.duration', + 'leaseState': 'lease.state', + 'copyId': 'copy.id', + 'copyStatus': 'copy.status', + 'copySource': 'copy.source', + 'copyProgress': 'copy.progress', + 'copyCompletionTime': 'copy.completionTime', + 'copyStatusDescription': 'copy.statusDescription', + 'copyDestinationSnapshot': 'copy.destinationSnapshot', + 'publicAccess': 'publicAccessLevel', + 'incrementalCopy': 'isIncrementalCopy' +}; + +/** +* Normalize the property name from XML to keep consistent with +* the name defined in the property headers +*/ +exports.normalizePropertyNameFromXML = function (propertyName) { + if (this.IsNullOrEmptyOrUndefinedOrWhiteSpace(propertyName)) { + return ''; + } + + propertyName = propertyName.trim(); + propertyName = propertyName[0].toLowerCase() + propertyName.substring(1); + // So far the cases are: + // for the 'last-modified' property in listing resources + // for the 'content-*' properties in listing resources + // for the 'cache-control' property in listing blobs + // for the 'x-ms-blob-sequence-number' in listing blobs + if (propertyName in normalizePropertyNameExceptionList) { + return normalizePropertyNameExceptionList[propertyName]; + } else if (propertyName.toLowerCase().indexOf('-') != -1) { + return propertyName.replace('-', ''); + } else { + return propertyName; + } +}; + +/** +* Set the property value from XML +*/ +exports.setPropertyValueFromXML = function (result, xmlNode, toNormalize) { + for (var subPropertyName in xmlNode) { + if (xmlNode.hasOwnProperty(subPropertyName)) { + if (toNormalize) { + var propertyChain = this.normalizePropertyNameFromXML(subPropertyName).split('.'); + exports.setObjectInnerPropertyValue(result, propertyChain, xmlNode[subPropertyName]); + } else { + result[subPropertyName.toLowerCase()] = xmlNode[subPropertyName]; + } + + if (subPropertyName.toLowerCase() === 'copyprogress') { + var info = this.parseCopyProgress(xmlNode[subPropertyName]); + exports.setObjectInnerPropertyValue(result, ['copy', 'bytesCopied'], parseInt(info.bytesCopied)); + exports.setObjectInnerPropertyValue(result, ['copy', 'totalBytes'], parseInt(info.totalBytes)); + } + } + } +}; + +/** + * Filter out non-reserved properties from options + */ +exports.filterOutNonReservedProperties = function (reserved, options) { + var nonReservedProperties = {}; + if (options) { + for (var prop in options) { + if (options.hasOwnProperty(prop)) { + var isReserved = reserved.hasOwnProperty(prop); + var isFunction = typeof options[prop] === 'function'; + if (!isReserved && !isFunction) { + nonReservedProperties[prop] = options[prop]; + } + } + } + } + return nonReservedProperties; +}; + +/***/ }), + +/***/ 51046: +/***/ ((module, exports, __nccwpck_require__) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +var _ = __nccwpck_require__(7404); +var util = __nccwpck_require__(73837); + +var constants = __nccwpck_require__(60658); +var blobConstants = constants.BlobConstants; +var BlobUtilities = __nccwpck_require__(89959); +var FileUtilities = __nccwpck_require__(83656); +var azureutil = __nccwpck_require__(3396); +var SR = __nccwpck_require__(13497); +var check = __nccwpck_require__(49420); +var errors = __nccwpck_require__(12528); +var ArgumentError = errors.ArgumentError; +var ArgumentNullError = errors.ArgumentNullError; + +exports = module.exports; + +function initCallback(callbackParam, resultsCb) { + var fail; + if (callbackParam) { + fail = function (err) { + callbackParam(err); + return false; + }; + } else { + fail = function (err) { + throw err; + }; + callbackParam = function () {}; + } + + resultsCb(fail, callbackParam); +} + +/** +* Checks if the given value is a valid enumeration or not. +* +* @param {object} value The value to validate. +* @param {object} list The enumeration values. +* @return {boolean} +*/ +exports.isValidEnumValue = function (value, list, callback) { + var fail; + + initCallback(callback, function (f, cb) { + fail = f; + callback = cb; + }); + + if (!list.some(function (current) { + return current.toLowerCase() === value.toLowerCase(); + })) { + return fail(new RangeError(util.format('Invalid value: %s. Options are: %s.', value, list))); + } + + callback(); + return true; +}; + +/** +* Creates a anonymous function that check if the given uri is valid or not. +* +* @param {string} uri The uri to validate. +* @return {boolean} +*/ +exports.isValidUri = function (uri) { + if (!check.isURL(uri, { 'require_tld': false })){ + throw new URIError('The provided URI "' + uri + '" is invalid.'); + } + return true; +}; + +/** +* Checks if the given host is valid or not. +* +* @param {string|object} host The host to validate. +* @return {boolean} +*/ +exports.isValidHost= function (host) { + if (azureutil.objectIsNull(host)) { + throw new ArgumentNullError('host', SR.STORAGE_HOST_LOCATION_REQUIRED); + } else { + var storageHost = {}; + storageHost.primaryHost = _.isString(host) ? host : host.primaryHost; + if (storageHost.primaryHost && !check.isURL(storageHost.primaryHost, { 'require_tld': false })){ + throw new URIError('The provided URI "' + storageHost.primaryHost + '" is invalid.'); + } + + storageHost.secondaryHost = _.isString(host) ? undefined : host.secondaryHost; + if (storageHost.secondaryHost && !check.isURL(storageHost.secondaryHost, { 'require_tld': false })){ + throw new URIError('The provided URI "' + storageHost.secondaryHost + '" is invalid.'); + } + + if (!storageHost.primaryHost && !storageHost.secondaryHost) { + throw new ArgumentNullError('host', SR.STORAGE_HOST_LOCATION_REQUIRED); + } + } + + return true; +}; + +/** +* Checks if the given value is a valid UUID or not. +* +* @param {string|object} uuid The uuid to validate. +* @return {boolean} +*/ +exports.isValidUuid = function(uuid, callback) { + var validUuidRegex = /^[a-zA-Z0-9]{8}\-[a-zA-Z0-9]{4}\-[a-zA-Z0-9]{4}\-[a-zA-Z0-9]{4}\-[a-zA-Z0-9]{12}$/; + + var fail; + + initCallback(callback, function (f, cb) { + fail = f; + callback = cb; + }); + + if (!validUuidRegex.test(uuid)) { + return fail(new SyntaxError('The value is not a valid UUID format.')); + } + + callback(); + return true; +}; + +/** +* Creates a anonymous function that check if a given key is base 64 encoded. +* +* @param {string} key The key to validate. +* @return {function} +*/ +exports.isBase64Encoded = function (key) { + var isValidBase64String = key.match(/^([A-Za-z0-9+/]{4})*([A-Za-z0-9+/]{4}|[A-Za-z0-9+/]{3}=|[A-Za-z0-9+/]{2}==)$/); + + if (isValidBase64String) { + return true; + } else { + throw new SyntaxError('The provided account key ' + key + ' is not a valid base64 string.'); + } +}; + +/** +* Validates a function. +* +* @param {object} function The function to validate. +* @return {function} +*/ +exports.isValidFunction = function (functionObject, functionName) { + if (!functionObject) { + throw new ArgumentNullError('functionObject', functionName + ' must be specified.'); + } + if(!_.isFunction(functionObject)){ + throw new TypeError(functionName + ' specified should be a function.'); + } + return true; +}; + +var getNameError = function(name, typeName) { + // checks if name is null, undefined or empty + if (azureutil.stringIsEmpty(name)) { + return new ArgumentNullError('name', util.format('%s name must be a non empty string.', typeName)); + } + + // check if name is between 3 and 63 characters + if (name.length < 3 || name.length > 63) { + return new ArgumentError('name', util.format('%s name must be between 3 and 63 characters long.', typeName)); + } + + // check if name follows naming rules + if (name.match(/^([a-z0-9]+(-[a-z0-9]+)*)$/) === null) { + return new SyntaxError(util.format('%s name format is incorrect.', typeName)); + } + + return null; +}; + +/** +* Validates a container name. +* +* @param {string} containerName The container name. +*/ +exports.containerNameIsValid = function (containerName, callback) { + var fail; + initCallback(callback, function (f, cb) { + fail = f; + callback = cb; + }); + + var nameError = getNameError(containerName, 'Container'); + + if (!nameError || containerName.match(/^(\$root|\$logs|\$web)$/)) { + callback(); + return true; + } else { + return fail(nameError); + } +}; + +/** +* Validates a blob name. +* +* @param {string} containerName The container name. +* @param {string} blobname The blob name. +*/ +exports.blobNameIsValid = function (containerName, blobName, callback) { + var fail; + + initCallback(callback, function (f, cb) { + fail = f; + callback = cb; + }); + + if (!blobName) { + return fail(new ArgumentNullError('blobName', 'Blob name is not specified.')); + } + + if (containerName === '$root' && blobName.indexOf('/') !== -1) { + return fail(new SyntaxError('Blob name format is incorrect.')); + } + + callback(); + return true; +}; + +/** +* Validates a blob tier name. +* +* @param {string} blobTier The blob tier name. +*/ +exports.blobTierNameIsValid = function (blobTier, callback) { + var fail; + + initCallback(callback, function (f, cb) { + fail = f; + callback = cb; + }); + + if (!blobTier) { + return fail(new ArgumentNullError('blobTier', 'Blob tier is not specified.')); + } + + if (!_.chain(_.union( + _.values(BlobUtilities.BlobTier.PremiumPageBlobTier), + _.values(BlobUtilities.BlobTier.StandardBlobTier) + )) + .map(function (val) { return val.toString().toUpperCase(); }) + .contains(blobTier.toString().toUpperCase()) + .value()) { + return fail(new SyntaxError('Blob tier is incorrect. Refer to BlobUtilities.BlobTier for possible values.')); + } + + callback(); + return true; +}; + +/** +* Validates a share name. +* +* @param {string} shareName The share name. +*/ +exports.shareNameIsValid = function (shareName, callback) { + var fail; + initCallback(callback, function (f, cb) { + fail = f; + callback = cb; + }); + + var nameError = getNameError(shareName, 'Share'); + + if (!nameError) { + callback(); + return true; + } else { + return fail(nameError); + } +}; + +/** +* Validates a queue name. +* +* @param {string} queueName The queue name. +*/ +exports.queueNameIsValid = function (queueName, callback) { + var fail; + + initCallback(callback, function (f, cb) { + fail = f; + callback = cb; + }); + + var nameError = getNameError(queueName, 'Queue'); + + if (!nameError) { + callback(); + return true; + } else { + return fail(nameError); + } +}; + +/** +* Validates a table name. +* +* @param {string} table The table name. +*/ +exports.tableNameIsValid = function (table, callback) { + var fail; + + initCallback(callback, function (f, cb) { + fail = f; + callback = cb; + }); + + if (azureutil.stringIsEmpty(table)) { + return fail(new ArgumentNullError('table', 'Table name must be a non empty string.')); + } + + if (table.length < 3 || table.length > 63) { + return fail(new ArgumentError('table', 'Table name must be between 3 and 63 characters long.')); + } + + if(table.toLowerCase() === 'tables') { + return fail(new RangeError('Table name cannot be \'Tables\'.')); + } + + if (table.match(/^([A-Za-z][A-Za-z0-9]{2,62})$/) !== null || table === '$MetricsCapacityBlob' || table.match(/^(\$Metrics(HourPrimary|MinutePrimary|HourSecondary|MinuteSecondary)?(Transactions)(Blob|Queue|Table|File))$/) !== null) + { + callback(); + return true; + } else { + return fail(new SyntaxError('Table name format is incorrect.')); + } +}; + +/** +* Validates an HTML File object. +* +* @param {File} browserFile The HTML File object. +*/ +exports.browserFileIsValid = function (browserFile, callback) { + var fail; + + initCallback(callback, function (f, cb) { + fail = f; + callback = cb; + }); + + // IE doesn't support File.constructor.name + if (!azureutil.isBrowser() || + !browserFile || + !browserFile.constructor || + (!azureutil.isIE() && !browserFile.constructor.name) || + (!azureutil.isIE() && browserFile.constructor.name !== 'File' && browserFile.constructor.name !== 'Blob') || + !azureutil.objectIsInt(browserFile.size)) { + return fail(new ArgumentError('type', 'Invalid HTML File object.')); + } else { + callback(); + return true; + } +}; + +/** +* Validates page ranges. +* +* @param {int} rangeStart The range starting position. +* @param {int} rangeEnd The range ending position. +* @param {int} writeBlockSizeInBytes The block size. +*/ +exports.pageRangesAreValid = function (rangeStart, rangeEnd, writeBlockSizeInBytes, callback) { + var fail; + + initCallback(callback, function (f, cb) { + fail = f; + callback = cb; + }); + + if (rangeStart % 512 !== 0) { + return fail(new RangeError('Start byte offset must be a multiple of 512.')); + } + + var size = null; + if (!azureutil.objectIsNull(rangeEnd)) { + if ((rangeEnd + 1) % 512 !== 0) { + return fail(new RangeError('End byte offset must be a multiple of 512 minus 1.')); + } + + size = (rangeEnd - rangeStart) + 1; + if (size > writeBlockSizeInBytes) { + return fail(new RangeError('Page blob size cannot be larger than ' + writeBlockSizeInBytes + ' bytes.')); + } + } + + callback(); + return true; +}; + +/** +* Validates a blob type. +* +* @param {string} type The type name. +*/ +exports.blobTypeIsValid = function (type, callback) { + var getEnumValues = function (obj) { + var values = []; + for (var prop in obj) { + if (obj.hasOwnProperty(prop)) { + values.push(obj[prop]); + } + } + return values; + }; + + return this.isValidEnumValue(type, getEnumValues(blobConstants.BlobTypes), callback); +}; + +/** +* Validates share ACL type. +* +* @param {string} type The type name. +*/ +exports.shareACLIsValid = function (type, callback) { + var fail; + + initCallback(callback, function (f, cb) { + fail = f; + callback = cb; + }); + + if (type != FileUtilities.SharePublicAccessType.OFF) { + fail(new ArgumentError('type', 'The access type is not supported.')); + } + + callback(); + return true; +}; + +/** +* Validates share quota value. +* +* @param {int} type The quota value. +*/ +exports.shareQuotaIsValid = function (quota, callback) { + var fail; + + initCallback(callback, function (f, cb) { + fail = f; + callback = cb; + }); + + if (quota && quota <= 0) { + fail(new RangeError('The share quota value, in GB, must be greater than 0.')); + } + + callback(); + return true; +}; + +// common functions for validating arguments + +function throwMissingArgument(name, func) { + throw new ArgumentNullError(name, 'Required argument ' + name + ' for function ' + func + ' is not defined'); +} + +function ArgumentValidator(functionName) { + this.func = functionName; +} + +_.extend(ArgumentValidator.prototype, { + string: function (val, name) { + this.exists(val, name); + if (typeof val !== 'string') { + throw new TypeError('Parameter ' + name + ' for function ' + this.func + ' should be a non-empty string'); + } + }, + + stringAllowEmpty: function (val, name) { + if (typeof val !== 'string') { + throw new TypeError('Parameter ' + name + ' for function ' + this.func + ' should be a string'); + } + }, + + object: function (val, name) { + this.exists(val, name); + if (typeof val !== 'object') { + throw new TypeError('Parameter ' + name + ' for function ' + this.func + ' should be an object'); + } + }, + + exists: function (val, name) { + if (!val) { + throwMissingArgument(name, this.func); + } + }, + + function: function (val, name) { + this.exists(val, name); + if (typeof val !== 'function') { + throw new TypeError('Parameter ' + name + ' for function ' + this.func + ' should be a function'); + } + }, + + value: function (val, name) { + if (!val && val !== 0) { + throwMissingArgument(name, this.func); + } + }, + + nonEmptyArray: function (val, name) { + if (!val || val.length === 0) { + throw new TypeError('Required array argument ' + name + ' for function ' + this.func + ' is either not defined or empty'); + } + }, + + callback: function (val) { + this.exists(val, 'callback'); + this.function(val, 'callback'); + }, + + test: function (predicate, message) { + if (!predicate()) { + throw new Error(message + ' in function ' + this.func); + } + }, + + tableNameIsValid: exports.tableNameIsValid, + browserFileIsValid: exports.browserFileIsValid, + containerNameIsValid: exports.containerNameIsValid, + shareNameIsValid: exports.shareNameIsValid, + blobNameIsValid: exports.blobNameIsValid, + blobTierNameIsValid: exports.blobTierNameIsValid, + pageRangesAreValid: exports.pageRangesAreValid, + queueNameIsValid: exports.queueNameIsValid, + blobTypeIsValid: exports.blobTypeIsValid, + shareACLIsValid: exports.shareACLIsValid, + shareQuotaIsValid: exports.shareQuotaIsValid, + isValidEnumValue: exports.isValidEnumValue +}); + +function validateArgs(functionName, validationRules) { + var validator = new ArgumentValidator(functionName); + validationRules(validator); +} + +exports.ArgumentValidator = ArgumentValidator; +exports.validateArgs = validateArgs; + +/***/ }), + +/***/ 10210: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var qs = __nccwpck_require__(63477); +var url = __nccwpck_require__(57310); +var util = __nccwpck_require__(73837); +var _ = __nccwpck_require__(7404); +var extend = __nccwpck_require__(99237); + +var azureCommon = __nccwpck_require__(82187); +var BlockRangeStream = __nccwpck_require__(99032); +var Md5Wrapper = __nccwpck_require__(11007); +var PageRangeStream = __nccwpck_require__(96051); +var RangeStream = __nccwpck_require__(98664); +var azureutil = azureCommon.util; +var SR = azureCommon.SR; +var validate = azureCommon.validate; +var StorageServiceClient = azureCommon.StorageServiceClient; +var WebResource = azureCommon.WebResource; + +// Constants +var Constants = azureCommon.Constants; +var BlobConstants = Constants.BlobConstants; +var HeaderConstants = Constants.HeaderConstants; +var QueryStringConstants = Constants.QueryStringConstants; +var RequestLocationMode = Constants.RequestLocationMode; + +// Streams +var BatchOperation = azureCommon.BatchOperation; +var SpeedSummary = azureCommon.SpeedSummary; +var ChunkAllocator = azureCommon.ChunkAllocator; +var ChunkStream = azureCommon.ChunkStream; +var ChunkStreamWithStream = azureCommon.ChunkStreamWithStream; + +// Models requires +var AclResult = azureCommon.AclResult; +var ServiceStatsParser = azureCommon.ServiceStatsParser; +var AccountPropertiesResult = __nccwpck_require__(36426); +var BlockListResult = __nccwpck_require__(50440); +var BlobResult = __nccwpck_require__(3128); +var ContainerResult = __nccwpck_require__(56983); +var LeaseResult = __nccwpck_require__(96561); + +var BlobUtilities = __nccwpck_require__(89959); + +// Errors requires +var errors = __nccwpck_require__(12528); +var ArgumentError = errors.ArgumentError; +var ArgumentNullError = errors.ArgumentNullError; +var StorageError = errors.StorageError; + +/** +* Creates a new BlobService object. +* If no connection string or storageaccount and storageaccesskey are provided, +* the AZURE_STORAGE_CONNECTION_STRING or AZURE_STORAGE_ACCOUNT and AZURE_STORAGE_ACCESS_KEY environment variables will be used. +* @class +* The BlobService class is used to perform operations on the Microsoft Azure Blob Service. +* The Blob Service provides storage for binary large objects, and provides +* functions for working with data stored in blobs as either streams or pages of data. +* +* For more information on the Blob Service, as well as task focused information on using it in a Node.js application, see +* [How to Use the Blob Service from Node.js](http://azure.microsoft.com/en-us/documentation/articles/storage-nodejs-how-to-use-blob-storage/). +* The following defaults can be set on the blob service. +* singleBlobPutThresholdInBytes The default maximum size, in bytes, of a blob before it must be separated into blocks. +* defaultEnableReuseSocket The default boolean value to enable socket reuse when uploading local files or streams. +* If the Node.js version is lower than 0.10.x, socket reuse will always be turned off. +* defaultTimeoutIntervalInMs The default timeout interval, in milliseconds, to use for request made via the Blob service. +* defaultClientRequestTimeoutInMs The default timeout of client requests, in milliseconds, to use for the request made via the Blob service. +* defaultMaximumExecutionTimeInMs The default maximum execution time across all potential retries, for requests made via the Blob service. +* defaultLocationMode The default location mode for requests made via the Blob service. +* parallelOperationThreadCount The number of parallel operations that may be performed when uploading a blob that is greater than +* the value specified by the singleBlobPutThresholdInBytes property in size. +* useNagleAlgorithm Determines whether the Nagle algorithm is used for requests made via the Blob service; true to use the +* Nagle algorithm; otherwise, false. The default value is false. +* enableGlobalHttpAgent Determines whether global HTTP(s) agent is enabled; true to use Global HTTP(s) agent; otherwise, false to use +* http(s).Agent({keepAlive:true}). +* @constructor +* @extends {StorageServiceClient} +* +* @param {string} [storageAccountOrConnectionString] The storage account or the connection string. +* @param {string} [storageAccessKey] The storage access key. +* @param {string|object} [host] The host address. To define primary only, pass a string. +* Otherwise 'host.primaryHost' defines the primary host and 'host.secondaryHost' defines the secondary host. +* @param {string} [sas] The Shared Access Signature string. +* @param {string} [endpointSuffix] The endpoint suffix. +* @param {TokenCredential} [token] The {@link TokenCredential} object. +*/ +function BlobService(storageAccountOrConnectionString, storageAccessKey, host, sas, endpointSuffix, token) { + var storageServiceSettings = StorageServiceClient.getStorageSettings(storageAccountOrConnectionString, storageAccessKey, host, sas, endpointSuffix, token); + + BlobService['super_'].call(this, + storageServiceSettings._name, + storageServiceSettings._key, + storageServiceSettings._blobEndpoint, + storageServiceSettings._usePathStyleUri, + storageServiceSettings._sasToken, + token); + + this.defaultEnableReuseSocket = Constants.DEFAULT_ENABLE_REUSE_SOCKET; + this.singleBlobPutThresholdInBytes = BlobConstants.DEFAULT_SINGLE_BLOB_PUT_THRESHOLD_IN_BYTES; + this.parallelOperationThreadCount = Constants.DEFAULT_PARALLEL_OPERATION_THREAD_COUNT; +} + +util.inherits(BlobService, StorageServiceClient); + +// Non-class methods + +/** +* Create resource name +* @ignore +* +* @param {string} containerName Container name +* @param {string} blobName Blob name +* @return {string} The encoded resource name. +*/ +function createResourceName(containerName, blobName, forSAS) { + // Resource name + if (blobName && !forSAS) { + blobName = encodeURIComponent(blobName); + blobName = blobName.replace(/%2F/g, '/'); + blobName = blobName.replace(/%5C/g, '/'); + blobName = blobName.replace(/\+/g, '%20'); + } + + // return URI encoded resource name + if (blobName) { + return containerName + '/' + blobName; + } + else { + return containerName; + } +} + +// Blob service methods + +/** +* Gets the service stats for a storage account’s Blob service. +* +* @this {BlobService} +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; otherwise, `[result]{@link ServiceStats}` will contain the stats and +* `response` will contain information related to this operation. +*/ +BlobService.prototype.getServiceStats = function (optionsOrCallback, callback) { + var options; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { options = o; callback = c; }); + + validate.validateArgs('getServiceStats', function (v) { + v.callback(callback); + }); + + var webResource = WebResource.get() + .withQueryOption(QueryStringConstants.COMP, 'stats') + .withQueryOption(QueryStringConstants.RESTYPE, 'service'); + + options.requestLocationMode = RequestLocationMode.PRIMARY_OR_SECONDARY; + + var processResponseCallback = function (responseObject, next) { + responseObject.serviceStatsResult = null; + if (!responseObject.error) { + responseObject.serviceStatsResult = ServiceStatsParser.parse(responseObject.response.body.StorageServiceStats); + } + + // function to be called after all filters + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.serviceStatsResult, returnObject.response); + }; + + // call the first filter + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Gets the properties of a storage account’s Blob service, including Azure Storage Analytics. +* +* @this {BlobService} +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; otherwise, `[result]{@link BlobServiceProperties}` will contain the properties +* and `response` will contain information related to this operation. +*/ +BlobService.prototype.getServiceProperties = function (optionsOrCallback, callback) { + return this.getAccountServiceProperties(optionsOrCallback, callback); +}; + +/** +* Gets the properties of a storage account. +* +* @this {BlobService} +* @param {string} [container] Optional. Name of an existing container. Required when using a SAS token to a specific container or blob. +* @param {string} [blob] Optional. Name of an existing blob. Required when using a SAS token to a specific blob. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; otherwise, `[result]{@link ServiceProperties}` will contain the properties +* and `response` will contain information related to this operation. +*/ +BlobService.prototype.getAccountProperties = function (container, blob, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('getAccountProperties', function (v) { + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + var webResource = WebResource.head(createResourceName(container, blob)) + .withQueryOption(QueryStringConstants.COMP, 'properties') + .withQueryOption(QueryStringConstants.RESTYPE, 'account'); + + options.requestLocationMode = RequestLocationMode.PRIMARY_OR_SECONDARY; + + var processResponseCallback = function (responseObject, next) { + responseObject.accountPropertiesResult = null; + if (!responseObject.error) { + responseObject.accountPropertiesResult = AccountPropertiesResult.parse(responseObject.response.headers); + } + + // function to be called after all filters + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.accountPropertiesResult, returnObject.response); + }; + + // call the first filter + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Sets the properties of a storage account's Blob service, including Azure Storage Analytics. +* You can also use this operation to set the default request version for all incoming requests that do not have a version specified. +* When you set blob service properties (such as enabling soft delete), it may take up to 30 seconds to take effect. +* +* @this {BlobService} +* @param {object} serviceProperties A `[BlobServiceProperties]{@link BlobServiceProperties}` object. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information +* if an error occurs; otherwise, `response` +* will contain information related to this operation. +*/ +BlobService.prototype.setServiceProperties = function (serviceProperties, optionsOrCallback, callback) { + return this.setAccountServiceProperties(serviceProperties, optionsOrCallback, callback); +}; + +/** +* Sets the tier of a blockblob under a blob storage account, or the tier of a pageblob under a premium storage account. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {string} blobTier Please see BlobUtilities.BlobTier.StandardBlobTier or BlobUtilities.BlobTier.PremiumPageBlobTier for possible values. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information +* if an error occurs; otherwise, `response` +* will contain information related to this operation. +*/ +BlobService.prototype.setBlobTier = function (container, blob, blobTier, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('setBlobTier', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.string(blobTier, 'blobTier'); + v.containerNameIsValid(container); + v.blobNameIsValid(container, blob); + v.blobTierNameIsValid(blobTier); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + var resourceName = createResourceName(container, blob); + var webResource = WebResource.put(resourceName) + .withQueryOption(QueryStringConstants.COMP, 'tier') + .withHeader(HeaderConstants.ACCESS_TIER, blobTier); + + var processResponseCallback = function (responseObject, next) { + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Lists a segment containing a collection of container items under the specified account. +* +* @this {BlobService} +* @param {object} currentToken A continuation token returned by a previous listing operation. Please use 'null' or 'undefined' if this is the first operation. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.maxResults] Specifies the maximum number of containers to return per call to Azure storage. +* @param {string} [options.include] Include this parameter to specify that the container's metadata be returned as part of the response body. (allowed values: '', 'metadata') +* **Note** that all metadata names returned from the server will be converted to lower case by NodeJS itself as metadata is set via HTTP headers and HTTP header names are case insensitive. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will contain `entries` and `continuationToken`. +* `entries` gives a list of `[containers]{@link ContainerResult}` and the `continuationToken` is used for the next listing operation. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.listContainersSegmented = function (currentToken, optionsOrCallback, callback) { + this.listContainersSegmentedWithPrefix(null /* prefix */, currentToken, optionsOrCallback, callback); +}; + +/** +* Lists a segment containing a collection of container items whose names begin with the specified prefix under the specified account. +* +* @this {BlobService} +* @param {string} prefix The prefix of the container name. +* @param {object} currentToken A continuation token returned by a previous listing operation. Please use 'null' or 'undefined' if this is the first operation. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.maxResults] Specifies the maximum number of containers to return per call to Azure storage. +* @param {string} [options.include] Include this parameter to specify that the container's metadata be returned as part of the response body. (allowed values: '', 'metadata') +* **Note** that all metadata names returned from the server will be converted to lower case by NodeJS itself as metadata is set via HTTP headers and HTTP header names are case insensitive. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will contain `entries` and `continuationToken`. +* `entries` gives a list of `[containers]{@link ContainerResult}` and the `continuationToken` is used for the next listing operation. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.listContainersSegmentedWithPrefix = function (prefix, currentToken, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('listContainers', function (v) { + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.get() + .withQueryOption(QueryStringConstants.COMP, 'list') + .withQueryOption(QueryStringConstants.MAX_RESULTS, options.maxResults) + .withQueryOption(QueryStringConstants.INCLUDE, options.include); + + if (!azureutil.objectIsNull(currentToken)) { + webResource.withQueryOption(QueryStringConstants.MARKER, currentToken.nextMarker); + } + + webResource.withQueryOption(QueryStringConstants.PREFIX, prefix); + + options.requestLocationMode = azureutil.getNextListingLocationMode(currentToken); + + var processResponseCallback = function (responseObject, next) { + responseObject.listContainersResult = null; + + if (!responseObject.error) { + responseObject.listContainersResult = { + entries: null, + continuationToken: null + }; + responseObject.listContainersResult.entries = []; + + var containers = []; + + if (responseObject.response.body.EnumerationResults.Containers && responseObject.response.body.EnumerationResults.Containers.Container) { + containers = responseObject.response.body.EnumerationResults.Containers.Container; + if (!_.isArray(containers)) { + containers = [containers]; + } + } + + containers.forEach(function (currentContainer) { + var containerResult = ContainerResult.parse(currentContainer); + responseObject.listContainersResult.entries.push(containerResult); + }); + + if (responseObject.response.body.EnumerationResults.NextMarker) { + responseObject.listContainersResult.continuationToken = { + nextMarker: null, + targetLocation: null + }; + + responseObject.listContainersResult.continuationToken.nextMarker = responseObject.response.body.EnumerationResults.NextMarker; + responseObject.listContainersResult.continuationToken.targetLocation = responseObject.targetLocation; + } + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.listContainersResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +// Container methods + +/** +* Checks whether or not a container exists on the service. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link ContainerResult}` will contain +* the container information including `exists` boolean member. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.doesContainerExist = function (container, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('doesContainerExist', function (v) { + v.string(container, 'container'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + this._doesContainerExist(container, false, options, callback); +}; + +/** +* Creates a new container under the specified account. +* If a container with the same name already exists, the operation fails. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {string} [options.publicAccessLevel] Specifies whether data in the container may be accessed publicly and the level of access. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link ContainerResult}` will contain +* the container information. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.createContainer = function (container, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('createContainer', function (v) { + v.string(container, 'container'); + v.test(function () { return container !== '$logs'; }, + 'Container name format is incorrect'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.put(container) + .withQueryOption(QueryStringConstants.RESTYPE, 'container'); + + webResource.addOptionalMetadataHeaders(options.metadata); + webResource.withHeader(HeaderConstants.BLOB_PUBLIC_ACCESS, options.publicAccessLevel); + + var processResponseCallback = function (responseObject, next) { + responseObject.containerResult = null; + if (!responseObject.error) { + responseObject.containerResult = new ContainerResult(container); + responseObject.containerResult.getPropertiesFromHeaders(responseObject.response.headers); + + if (options.metadata) { + responseObject.containerResult.metadata = options.metadata; + } + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.containerResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Creates a new container under the specified account if the container does not exists. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {string} [options.publicAccessLevel] Specifies whether data in the container may be accessed publicly and the level of access. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link ContainerResult}` will contain +* the container information including `created` boolean member. +* `response` will contain information related to this operation. +* +* @example +* var azure = require('azure-storage'); +* var blobService = azure.createBlobService(); +* blobService.createContainerIfNotExists('taskcontainer', {publicAccessLevel : 'blob'}, function(error) { +* if(!error) { +* // Container created or exists, and is public +* } +* }); +*/ +BlobService.prototype.createContainerIfNotExists = function (container, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('createContainerIfNotExists', function (v) { + v.string(container, 'container'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var self = this; + self._doesContainerExist(container, true, options, function (error, result, response) { + var exists = result.exists; + result.created = false; + delete result.exists; + + if (error) { + callback(error, result, response); + } else if (exists) { + response.isSuccessful = true; + callback(error, result, response); + } else { + self.createContainer(container, options, function (createError, containerResult, createResponse) { + if (!createError) { + containerResult.created = true; + } + else if (createError && createError.statusCode === Constants.HttpConstants.HttpResponseCodes.Conflict && createError.code === Constants.BlobErrorCodeStrings.CONTAINER_ALREADY_EXISTS) { + // If it was created before, there was no actual error. + createError = null; + createResponse.isSuccessful = true; + } + + callback(createError, containerResult, createResponse); + }); + } + }); +}; + +/** +* Retrieves a container and its properties from a specified account. +* **Note** that all metadata names returned from the server will be converted to lower case by NodeJS itself as metadata is set via HTTP headers and HTTP header names are case insensitive. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {string} [options.leaseId] The container lease identifier. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link ContainerResult}` will contain +* information for the container. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.getContainerProperties = function (container, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('getContainerProperties', function (v) { + v.string(container, 'container'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.head(container) + .withQueryOption(QueryStringConstants.RESTYPE, 'container') + .withHeader(HeaderConstants.LEASE_ID, options.leaseId); + + options.requestLocationMode = Constants.RequestLocationMode.PRIMARY_OR_SECONDARY; + + var self = this; + var processResponseCallback = function (responseObject, next) { + responseObject.containerResult = null; + if (!responseObject.error) { + responseObject.containerResult = new ContainerResult(container); + responseObject.containerResult.metadata = self.parseMetadataHeaders(responseObject.response.headers); + responseObject.containerResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.containerResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Returns all user-defined metadata for the container. +* **Note** that all metadata names returned from the server will be converted to lower case by NodeJS itself as metadata is set via HTTP headers and HTTP header names are case insensitive. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {object} [options] The request options. +* @param {string} [options.leaseId] The container lease identifier. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link ContainerResult}` will contain +* information for the container. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.getContainerMetadata = function (container, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('getContainerMetadata', function (v) { + v.string(container, 'container'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.head(container) + .withQueryOption(QueryStringConstants.RESTYPE, 'container') + .withQueryOption(QueryStringConstants.COMP, 'metadata') + .withHeader(HeaderConstants.LEASE_ID, options.leaseId); + + options.requestLocationMode = Constants.RequestLocationMode.PRIMARY_OR_SECONDARY; + + var self = this; + var processResponseCallback = function (responseObject, next) { + responseObject.containerResult = null; + if (!responseObject.error) { + responseObject.containerResult = new ContainerResult(container); + responseObject.containerResult.metadata = self.parseMetadataHeaders(responseObject.response.headers); + responseObject.containerResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.containerResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Sets the container's metadata. +* +* Calling the Set Container Metadata operation overwrites all existing metadata that is associated with the container. +* It's not possible to modify an individual name/value pair. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {object} metadata The metadata key/value pairs. +* @param {object} [options] The request options. +* @param {string} [options.leaseId] The container lease identifier. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information +* if an error occurs; otherwise +* `response` will contain information related to this operation. +*/ +BlobService.prototype.setContainerMetadata = function (container, metadata, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('setContainerMetadata', function (v) { + v.string(container, 'container'); + v.object(metadata, 'metadata'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.put(container) + .withQueryOption(QueryStringConstants.RESTYPE, 'container') + .withQueryOption(QueryStringConstants.COMP, 'metadata') + .withHeader(HeaderConstants.LEASE_ID, options.leaseId); + + webResource.addOptionalMetadataHeaders(metadata); + + var processResponseCallback = function (responseObject, next) { + responseObject.containerResult = null; + if (!responseObject.error) { + responseObject.containerResult = new ContainerResult(container); + responseObject.containerResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.containerResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Gets the container's ACL. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {object} [options] The request options. +* @param {string} [options.leaseId] The container lease identifier. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link ContainerAclResult}` will contain +* information for the container. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.getContainerAcl = function (container, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('getContainerAcl', function (v) { + v.string(container, 'container'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.get(container) + .withQueryOption(QueryStringConstants.RESTYPE, 'container') + .withQueryOption(QueryStringConstants.COMP, 'acl') + .withHeader(HeaderConstants.LEASE_ID, options.leaseId); + + options.requestLocationMode = Constants.RequestLocationMode.PRIMARY_OR_SECONDARY; + + var processResponseCallback = function (responseObject, next) { + responseObject.containerResult = null; + if (!responseObject.error) { + responseObject.containerResult = new ContainerResult(container); + responseObject.containerResult.getPropertiesFromHeaders(responseObject.response.headers); + responseObject.containerResult.signedIdentifiers = AclResult.parse(responseObject.response.body); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.containerResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Updates the container's ACL. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {Object.} signedIdentifiers The container ACL settings. See `[AccessPolicy]{@link AccessPolicy}` for detailed information. +* @param {object} [options] The request options. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {string} [options.publicAccessLevel] Specifies whether data in the container may be accessed publicly and the level of access. +* @param {string} [options.leaseId] The container lease identifier. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link ContainerAclResult}` will contain +* information for the container. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.setContainerAcl = function (container, signedIdentifiers, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('setContainerAcl', function (v) { + v.string(container, 'container'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + var policies = null; + if (signedIdentifiers) { + if (_.isArray(signedIdentifiers)) { + throw new TypeError(SR.INVALID_SIGNED_IDENTIFIERS); + } + policies = AclResult.serialize(signedIdentifiers); + } + + var webResource = WebResource.put(container) + .withQueryOption(QueryStringConstants.RESTYPE, 'container') + .withQueryOption(QueryStringConstants.COMP, 'acl') + .withHeader(HeaderConstants.CONTENT_LENGTH, !azureutil.objectIsNull(policies) ? Buffer.byteLength(policies) : 0) + .withHeader(HeaderConstants.BLOB_PUBLIC_ACCESS, options.publicAccessLevel) + .withHeader(HeaderConstants.LEASE_ID, options.leaseId) + .withBody(policies); + + var processResponseCallback = function (responseObject, next) { + responseObject.containerResult = null; + if (!responseObject.error) { + responseObject.containerResult = new ContainerResult(container, options.publicAccessLevel); + responseObject.containerResult.getPropertiesFromHeaders(responseObject.response.headers); + if (signedIdentifiers) { + responseObject.containerResult.signedIdentifiers = signedIdentifiers; + } + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.containerResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, webResource.body, options, processResponseCallback); +}; + +/** +* Marks the specified container for deletion. +* The container and any blobs contained within it are later deleted during garbage collection. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {object} [options] The request options. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {string} [options.leaseId] The container lease identifier. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information +* if an error occurs; otherwise +* `response` will contain information related to this operation. +*/ +BlobService.prototype.deleteContainer = function (container, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('deleteContainer', function (v) { + v.string(container, 'container'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.del(container) + .withQueryOption(QueryStringConstants.RESTYPE, 'container') + .withHeader(HeaderConstants.LEASE_ID, options.leaseId); + + var processResponseCallback = function (responseObject, next) { + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Marks the specified container for deletion if it exists. +* The container and any blobs contained within it are later deleted during garbage collection. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {object} [options] The request options. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {string} [options.leaseId] The container lease identifier. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will +* be true if the container exists and was deleted, or false if the container +* did not exist. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.deleteContainerIfExists = function (container, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('deleteContainerIfExists', function (v) { + v.string(container, 'container'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var self = this; + self._doesContainerExist(container, true, options, function (error, result, response) { + if (error) { + callback(error, result.exists, response); + } else if (!result.exists) { + response.isSuccessful = true; + callback(error, false, response); + } else { + self.deleteContainer(container, options, function (deleteError, deleteResponse) { + var deleted; + if (!deleteError) { + deleted = true; + } else if (deleteError && deleteError.statuscode === Constants.HttpConstants.HttpResponseCodes.NotFound && deleteError.code === Constants.BlobErrorCodeStrings.CONTAINER_NOT_FOUND) { + // If it was deleted already, there was no actual error. + deleted = false; + deleteError = null; + deleteResponse.isSuccessful = true; + } + + callback(deleteError, deleted, deleteResponse); + }); + } + }); +}; + +/** +* Lists a segment containing a collection of blob directory items in the container. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {object} currentToken A continuation token returned by a previous listing operation. Please use 'null' or 'undefined' if this is the first operation. +* @param {object} [options] The request options. +* @param {int} [options.maxResults] Specifies the maximum number of directories to return per call to Azure ServiceClient. This does NOT affect list size returned by this function. (maximum: 5000) +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will contain `entries` and `continuationToken`. +* `entries` gives a list of `[directories]{@link DirectoryResult}` and the `continuationToken` is used for the next listing operation. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.listBlobDirectoriesSegmented = function (container, currentToken, optionsOrCallback, callback) { + this.listBlobDirectoriesSegmentedWithPrefix(container, null /* prefix */, currentToken, optionsOrCallback, callback); +}; + +/** +* Lists a segment containing a collection of blob directory items in the container. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} prefix The prefix of the blob directory. +* @param {object} currentToken A continuation token returned by a previous listing operation. Please use 'null' or 'undefined' if this is the first operation. +* @param {object} [options] The request options. +* @param {int} [options.maxResults] Specifies the maximum number of directories to return per call to Azure ServiceClient. This does NOT affect list size returned by this function. (maximum: 5000) +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will contain `entries` and `continuationToken`. +* `entries` gives a list of `[directories]{@link BlobResult}` and the `continuationToken` is used for the next listing operation. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.listBlobDirectoriesSegmentedWithPrefix = function (container, prefix, currentToken, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + userOptions.delimiter = '/'; + + this._listBlobsOrDirectoriesSegmentedWithPrefix(container, prefix, currentToken, BlobConstants.ListBlobTypes.Directory, userOptions, callback); +}; + +/** +* Lists a segment containing a collection of blob items in the container. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {object} currentToken A continuation token returned by a previous listing operation. Please use 'null' or 'undefined' if this is the first operation. +* @param {object} [options] The request options. +* @param {string} [options.delimiter] Delimiter, i.e. '/', for specifying folder hierarchy. +* @param {int} [options.maxResults] Specifies the maximum number of blobs to return per call to Azure ServiceClient. (maximum: 5000) +* @param {string} [options.include] Specifies that the response should include one or more of the following subsets: '', 'metadata', 'snapshots', 'uncommittedblobs', 'copy', 'deleted'). +* Please find these values in BlobUtilities.BlobListingDetails. Multiple values can be added separated with a comma (,). +* **Note** that all metadata names returned from the server will be converted to lower case by NodeJS itself as metadata is set via HTTP headers and HTTP header names are case insensitive. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will contain `entries` and `continuationToken`. +* `entries` gives a list of `[blobs]{@link BlobResult}` and the `continuationToken` is used for the next listing operation. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.listBlobsSegmented = function (container, currentToken, optionsOrCallback, callback) { + this.listBlobsSegmentedWithPrefix(container, null /* prefix */, currentToken, optionsOrCallback, callback); +}; + +/** +* Lists a segment containing a collection of blob items whose names begin with the specified prefix in the container. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} prefix The prefix of the blob name. +* @param {object} currentToken A continuation token returned by a previous listing operation. Please use 'null' or 'undefined' if this is the first operation. +* @param {object} [options] The request options. +* @param {string} [options.delimiter] Delimiter, i.e. '/', for specifying folder hierarchy. +* @param {int} [options.maxResults] Specifies the maximum number of blobs to return per call to Azure ServiceClient. (maximum: 5000) +* @param {string} [options.include] Specifies that the response should include one or more of the following subsets: '', 'metadata', 'snapshots', 'uncommittedblobs', 'copy', 'deleted'). +* Please find these values in BlobUtilities.BlobListingDetails. Multiple values can be added separated with a comma (,). +* **Note** that all metadata names returned from the server will be converted to lower case by NodeJS itself as metadata is set via HTTP headers and HTTP header names are case insensitive. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will contain +* the entries of `[blobs]{@link BlobResult}` and the continuation token for the next listing operation. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.listBlobsSegmentedWithPrefix = function (container, prefix, currentToken, optionsOrCallback, callback) { + this._listBlobsOrDirectoriesSegmentedWithPrefix(container, prefix, currentToken, BlobConstants.ListBlobTypes.Blob, optionsOrCallback, callback); +}; + +// Lease methods + +/** +* Acquires a new lease. If container and blob are specified, acquires a blob lease. Otherwise, if only container is specified and blob is null, acquires a container lease. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {object} [options] The request options. +* @param {string} [options.leaseDuration] The lease duration in seconds. A non-infinite lease can be between 15 and 60 seconds. Default is never to expire. +* @param {string} [options.proposedLeaseId] The proposed lease identifier. Must be a GUID. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link LeaseResult}` will contain +* the lease information. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.acquireLease = function (container, blob, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('acquireLease', function (v) { + v.string(container, 'container'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + if (!options.leaseDuration) { + options.leaseDuration = -1; + } + + this._leaseImpl(container, blob, null /* leaseId */, BlobConstants.LeaseOperation.ACQUIRE, options, callback); +}; + +/** +* Renews an existing lease. If container and blob are specified, renews the blob lease. Otherwise, if only container is specified and blob is null, renews the container lease. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {string} leaseId The lease identifier. Must be a GUID. +* @param {object} [options] The request options. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link LeaseResult}` will contain +* the lease information. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.renewLease = function (container, blob, leaseId, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('renewLease', function (v) { + v.string(container, 'container'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + this._leaseImpl(container, blob, leaseId, BlobConstants.LeaseOperation.RENEW, options, callback); +}; + +/** +* Changes the lease ID of an active lease. If container and blob are specified, changes the blob lease. Otherwise, if only container is specified and blob is null, changes the +* container lease. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {string} leaseId The current lease identifier. +* @param {string} proposedLeaseId The proposed lease identifier. Must be a GUID. +* @param {object} [options] The request options. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `[result]{@link LeaseResult}` will contain the lease information. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.changeLease = function (container, blob, leaseId, proposedLeaseId, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('changeLease', function (v) { + v.string(container, 'container'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + options.proposedLeaseId = proposedLeaseId; + this._leaseImpl(container, blob, leaseId, BlobConstants.LeaseOperation.CHANGE, options, callback); +}; + +/** +* Releases the lease. If container and blob are specified, releases the blob lease. Otherwise, if only container is specified and blob is null, releases the container lease. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {string} leaseId The lease identifier. +* @param {object} [options] The request options. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link LeaseResult}` will contain +* the lease information. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.releaseLease = function (container, blob, leaseId, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('releaseLease', function (v) { + v.string(container, 'container'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + this._leaseImpl(container, blob, leaseId, BlobConstants.LeaseOperation.RELEASE, options, callback); +}; + +/** +* Breaks the lease but ensures that another client cannot acquire a new lease until the current lease period has expired. If container and blob are specified, breaks the blob lease. +* Otherwise, if only container is specified and blob is null, breaks the container lease. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {object} [options] The request options. +* @param {int} [options.leaseBreakPeriod] The lease break period, between 0 and 60 seconds. If unspecified, a fixed-duration lease breaks after +* the remaining lease period elapses, and an infinite lease breaks immediately. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link LeaseResult}` will contain +* the lease information. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.breakLease = function (container, blob, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('breakLease', function (v) { + v.string(container, 'container'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + this._leaseImpl(container, blob, null /*leaseId*/, BlobConstants.LeaseOperation.BREAK, options, callback); +}; + +// Blob methods + +/** +* Returns all user-defined metadata, standard HTTP properties, and system properties for the blob. +* It does not return or modify the content of the blob. +* **Note** that all metadata names returned from the server will be converted to lower case by NodeJS itself as metadata is set via HTTP headers and HTTP header names are case insensitive. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {object} [options] The request options. +* @param {string} [options.snapshotId] The snapshot identifier. +* @param {string} [options.leaseId] The lease identifier. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link BlobResult}` will contain +* information about the blob. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.getBlobProperties = function (container, blob, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('getBlobProperties', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var resourceName = createResourceName(container, blob); + var webResource = WebResource.head(resourceName); + + if (options.snapshotId) { + webResource.withQueryOption(QueryStringConstants.SNAPSHOT, options.snapshotId); + } + + BlobResult.setHeadersFromBlob(webResource, options); + + options.requestLocationMode = Constants.RequestLocationMode.PRIMARY_OR_SECONDARY; + + var self = this; + var processResponseCallback = function (responseObject, next) { + responseObject.blobResult = null; + if (!responseObject.error) { + responseObject.blobResult = new BlobResult(container, blob); + responseObject.blobResult.metadata = self.parseMetadataHeaders(responseObject.response.headers); + responseObject.blobResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.blobResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Returns all user-defined metadata for the specified blob or snapshot. +* It does not modify or return the content of the blob. +* **Note** that all metadata names returned from the server will be converted to lower case by NodeJS itself as metadata is set via HTTP headers and HTTP header names are case insensitive. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {object} [options] The request options. +* @param {string} [options.snapshotId] The snapshot identifier. +* @param {string} [options.leaseId] The lease identifier. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link BlobResult}` will contain +* information about the blob. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.getBlobMetadata = function (container, blob, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('getBlobMetadata', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var resourceName = createResourceName(container, blob); + var webResource = WebResource.head(resourceName); + + webResource.withQueryOption(QueryStringConstants.COMP, 'metadata'); + webResource.withQueryOption(QueryStringConstants.SNAPSHOT, options.snapshotId); + + BlobResult.setHeadersFromBlob(webResource, options); + + options.requestLocationMode = Constants.RequestLocationMode.PRIMARY_OR_SECONDARY; + + var self = this; + var processResponseCallback = function (responseObject, next) { + responseObject.blobResult = null; + if (!responseObject.error) { + responseObject.blobResult = new BlobResult(container, blob); + responseObject.blobResult.metadata = self.parseMetadataHeaders(responseObject.response.headers); + responseObject.blobResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.blobResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Sets user-defined properties for the specified blob or snapshot. +* It does not modify or return the content of the blob. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {object} [properties] The blob properties to set. +* @param {string} [properties.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [properties.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [properties.contentLanguage] The natural languages used by this resource. +* @param {string} [properties.cacheControl] The blob's cache control. +* @param {string} [properties.contentDisposition] The blob's content disposition. +* @param {string} [properties.contentMD5] The blob's MD5 hash. +* @param {object} [options] The request options. +* @param {string} [options.leaseId] The lease identifier. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link BlobResult}` will contain +* information about the blob. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.setBlobProperties = function (container, blob, properties, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('setBlobProperties', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, { contentSettings: properties }, userOptions); + var resourceName = createResourceName(container, blob); + var webResource = WebResource.put(resourceName) + .withQueryOption(QueryStringConstants.COMP, 'properties'); + + BlobResult.setPropertiesFromBlob(webResource, options); + + this._setBlobPropertiesHelper({ + webResource: webResource, + options: options, + container: container, + blob: blob, + callback: callback + }); +}; + +/** +* Sets user-defined metadata for the specified blob or snapshot as one or more name-value pairs +* It does not modify or return the content of the blob. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {object} metadata The metadata key/value pairs. +* @param {object} [options] The request options. +* @param {string} [options.snapshotId] The snapshot identifier. +* @param {string} [options.leaseId] The lease identifier. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link BlobResult}` will contain +* information on the blob. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.setBlobMetadata = function (container, blob, metadata, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('setBlobMetadata', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.object(metadata, 'metadata'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var resourceName = createResourceName(container, blob); + var webResource = WebResource.put(resourceName) + .withQueryOption(QueryStringConstants.COMP, 'metadata'); + + webResource.withQueryOption(QueryStringConstants.SNAPSHOT, options.snapshotId); + + options.metadata = metadata; + BlobResult.setHeadersFromBlob(webResource, options); + + var processResponseCallback = function (responseObject, next) { + responseObject.blobResult = null; + if (!responseObject.error) { + responseObject.blobResult = new BlobResult(container, blob); + responseObject.blobResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.blobResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + + +/** +* Provides a stream to read from a blob. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {object} [options] The request options. +* @param {string} [options.snapshotId] The snapshot identifier. +* @param {string} [options.leaseId] The lease identifier. +* @param {string} [options.rangeStart] Return only the bytes of the blob in the specified range. +* @param {string} [options.rangeEnd] Return only the bytes of the blob in the specified range. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {boolean} [options.useTransactionalMD5] When set to true, Calculate and send/validate content MD5 for transactions. +* @param {boolean} [options.disableContentMD5Validation] When set to true, MD5 validation will be disabled when downloading blobs. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `[result]{@link BlobResult}` will contain the blob information. +* `response` will contain information related to this operation. +* @return {Readable} A Node.js Readable stream. +* @example +* var azure = require('azure-storage'); +* var blobService = azure.createBlobService(); +* var writable = fs.createWriteStream(destinationFileNameTarget); +* blobService.createReadStream(containerName, blobName).pipe(writable); +*/ +BlobService.prototype.createReadStream = function (container, blob, optionsOrCallback, callback) { + var options; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { options = o; callback = c; }); + + validate.validateArgs('createReadStream', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + }); + + var readStream = new ChunkStream(); + this.getBlobToStream(container, blob, readStream, options, function (error, responseBlob, response) { + if (error) { + readStream.emit('error', error); + } + + if (callback) { + callback(error, responseBlob, response); + } + }); + + return readStream; +}; + +/** +* Downloads a blob into a stream. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {Writable} writeStream The Node.js Writable stream. +* @param {object} [options] The request options. +* @param {boolean} [options.skipSizeCheck] Skip the size check to perform direct download. +* Set the option to true for small blobs. +* Parallel download and speed summary won't work with this option on. +* @param {SpeedSummary} [options.speedSummary] The download tracker objects. +* @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. +* @param {string} [options.snapshotId] The snapshot identifier. +* @param {string} [options.leaseId] The lease identifier. +* @param {string} [options.rangeStart] Return only the bytes of the blob in the specified range. +* @param {string} [options.rangeEnd] Return only the bytes of the blob in the specified range. +* @param {boolean} [options.useTransactionalMD5] When set to true, Calculate and send/validate content MD5 for transactions. +* @param {boolean} [options.disableContentMD5Validation] When set to true, MD5 validation will be disabled when downloading blobs. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `[result]{@link BlobResult}` will contain the blob information. +* `response` will contain information related to this operation. +* @return {SpeedSummary} +* +* @example +* var azure = require('azure-storage'); +* var blobService = azure.createBlobService(); +* blobService.getBlobToStream('taskcontainer', 'task1', fs.createWriteStream('task1-download.txt'), function(error, serverBlob) { +* if(!error) { +* // Blob available in serverBlob.blob variable +* } +* }); +*/ +BlobService.prototype.getBlobToStream = function (container, blob, writeStream, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + userOptions.speedSummary = userOptions.speedSummary || new SpeedSummary(blob); + + validate.validateArgs('getBlobToStream', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.object(writeStream, 'writeStream'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + var propertiesRequestOptions = { + timeoutIntervalInMs: options.timeoutIntervalInMs, + clientRequestTimeoutInMs: options.clientRequestTimeoutInMs, + snapshotId: options.snapshotId, + accessConditions: options.accessConditions + }; + + if (options.skipSizeCheck) { + this._getBlobToStream(container, blob, writeStream, options, callback); + } else { + var self = this; + this.getBlobProperties(container, blob, propertiesRequestOptions, function (error, properties) { + if (error) { + callback(error); + } else { + var size; + if (options.rangeStart) { + var endOffset = properties.contentLength - 1; + var end = options.rangeEnd ? Math.min(options.rangeEnd, endOffset) : endOffset; + size = end - options.rangeStart + 1; + } else { + size = properties.contentLength; + } + options.speedSummary.totalSize = size; + + if (size > self.singleBlobPutThresholdInBytes) { + azureutil.setObjectInnerPropertyValue(options, ['contentSettings', 'contentMD5'], azureutil.tryGetValueChain(properties, ['contentSettings', 'contentMD5'], null)); + self._getBlobToRangeStream(container, blob, properties.blobType, writeStream, options, callback); + } else { + self._getBlobToStream(container, blob, writeStream, options, callback); + } + } + }); + } + + return options.speedSummary; +}; + +/** +* Downloads a blob into a text string. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {object} [options] The request options. +* @param {string} [options.snapshotId] The snapshot identifier. +* @param {string} [options.leaseId] The lease identifier. +* @param {string} [options.rangeStart] Return only the bytes of the blob in the specified range. +* @param {string} [options.rangeEnd] Return only the bytes of the blob in the specified range. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {boolean} [options.disableContentMD5Validation] When set to true, MD5 validation will be disabled when downloading blobs. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {BlobService~blobToText} callback `error` will contain information +* if an error occurs; otherwise `text` will contain the blob contents, +* and `[blockBlob]{@link BlobResult}` will contain +* the blob information. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.getBlobToText = function (container, blob, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('getBlobToText', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var resourceName = createResourceName(container, blob); + var webResource = WebResource.get(resourceName) + .withRawResponse(); + + webResource.withQueryOption(QueryStringConstants.SNAPSHOT, options.snapshotId); + + BlobResult.setHeadersFromBlob(webResource, options); + this._setRangeContentMD5Header(webResource, options); + + options.requestLocationMode = Constants.RequestLocationMode.PRIMARY_OR_SECONDARY; + + var self = this; + var processResponseCallback = function (responseObject, next) { + responseObject.text = null; + responseObject.blobResult = null; + + if (!responseObject.error) { + responseObject.blobResult = new BlobResult(container, blob); + responseObject.blobResult.metadata = self.parseMetadataHeaders(responseObject.response.headers); + responseObject.blobResult.getPropertiesFromHeaders(responseObject.response.headers); + responseObject.text = responseObject.response.body; + + self._validateLengthAndMD5(options, responseObject); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.text, returnObject.blobResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection. +* If a blob has snapshots, you must delete them when deleting the blob. Using the deleteSnapshots option, you can choose either to delete both the blob and its snapshots, +* or to delete only the snapshots but not the blob itself. If the blob has snapshots, you must include the deleteSnapshots option or the blob service will return an error +* and nothing will be deleted. +* If you are deleting a specific snapshot using the snapshotId option, the deleteSnapshots option must NOT be included. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {object} [options] The request options. +* @param {string} [options.deleteSnapshots] The snapshot delete option. See azure.BlobUtilities.SnapshotDeleteOptions.*. +* @param {string} [options.snapshotId] The snapshot identifier. +* @param {string} [options.leaseId] The lease identifier. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information +* if an error occurs; `response` will contain information related to this operation. +*/ +BlobService.prototype.deleteBlob = function (container, blob, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('deleteBlob', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var resourceName = createResourceName(container, blob); + var webResource = WebResource.del(resourceName) + .withHeader(HeaderConstants.LEASE_ID, options.leaseId); + + if (!azureutil.objectIsNull(options.snapshotId) && !azureutil.objectIsNull(options.deleteSnapshots)) { + throw new ArgumentError('options', SR.INVALID_DELETE_SNAPSHOT_OPTION); + } + + webResource.withQueryOption(QueryStringConstants.SNAPSHOT, options.snapshotId); + webResource.withHeader(HeaderConstants.DELETE_SNAPSHOT, options.deleteSnapshots); + + BlobResult.setHeadersFromBlob(webResource, options); + + var processResponseCallback = function (responseObject, next) { + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* The undelete Blob operation restores the contents and metadata of soft deleted blob or snapshot. +* Attempting to undelete a blob or snapshot that is not soft deleted will succeed without any changes. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {object} [options] The request options. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information +* if an error occurs; `response` will contain information related to this operation. +*/ +BlobService.prototype.undeleteBlob = function (container, blob, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('deleteBlob', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var resourceName = createResourceName(container, blob); + var webResource = WebResource.put(resourceName) + .withQueryOption(QueryStringConstants.COMP, 'undelete'); + + BlobResult.setHeadersFromBlob(webResource, options); + + var processResponseCallback = function (responseObject, next) { + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Checks whether or not a blob exists on the service. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {object} [options] The request options. +* @param {string} [options.snapshotId] The snapshot identifier. +* @param {string} [options.leaseId] The lease identifier. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {Function(error, result, response)} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link BlobResult}` will contain +* the blob information including the `exists` boolean member. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.doesBlobExist = function (container, blob, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('doesBlobExist', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + this._doesBlobExist(container, blob, false, options, callback); +}; + +/** +* Marks the specified blob or snapshot for deletion if it exists. The blob is later deleted during garbage collection. +* If a blob has snapshots, you must delete them when deleting the blob. Using the deleteSnapshots option, you can choose either to delete both the blob and its snapshots, +* or to delete only the snapshots but not the blob itself. If the blob has snapshots, you must include the deleteSnapshots option or the blob service will return an error +* and nothing will be deleted. +* If you are deleting a specific snapshot using the snapshotId option, the deleteSnapshots option must NOT be included. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {object} [options] The request options. +* @param {string} [options.deleteSnapshots] The snapshot delete option. See azure.BlobUtilities.SnapshotDeleteOptions.*. +* @param {string} [options.snapshotId] The snapshot identifier. +* @param {string} [options.leaseId] The lease identifier. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will +* be true if the blob was deleted, or false if the blob +* does not exist. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.deleteBlobIfExists = function (container, blob, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('deleteBlobIfExists', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var self = this; + self._doesBlobExist(container, blob, true, options, function (error, existsResult, response) { + if (error) { + callback(error, existsResult.exists, response); + } else if (!existsResult.exists) { + response.isSuccessful = true; + callback(error, false, response); + } else { + self.deleteBlob(container, blob, options, function (deleteError, deleteResponse) { + var deleted; + if (!deleteError) { + deleted = true; + } else if (deleteError && deleteError.statusCode === Constants.HttpConstants.HttpResponseCodes.NotFound && deleteError.code === Constants.BlobErrorCodeStrings.BLOB_NOT_FOUND) { + // If it was deleted already, there was no actual error. + deleted = false; + deleteError = null; + deleteResponse.isSuccessful = true; + } + + callback(deleteError, deleted, deleteResponse); + }); + } + }); +}; + +/** +* Creates a read-only snapshot of a blob. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {object} [options] The request options. +* @param {string} [options.snapshotId] The snapshot identifier. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {string} [options.leaseId] The lease identifier. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will contain +* the ID of the snapshot. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.createBlobSnapshot = function (container, blob, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('createBlobSnapshot', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var resourceName = createResourceName(container, blob); + var webResource = WebResource.put(resourceName) + .withQueryOption(QueryStringConstants.COMP, 'snapshot'); + + BlobResult.setHeadersFromBlob(webResource, options); + + var processResponseCallback = function (responseObject, next) { + responseObject.snapshotId = null; + if (!responseObject.error) { + responseObject.snapshotId = responseObject.response.headers[HeaderConstants.SNAPSHOT]; + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.snapshotId, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Starts to copy a blob or an Azure Storage file to a destination blob. +* +* For an asynchronous copy(by default), this operation returns a object including a copy ID which +* you can use to check or abort the copy operation. The Blob service copies blobs on a best-effort basis. +* The source blob for an asynchronous copy operation may be a block blob, an append blob, +* a page blob or an Azure Storage file. +* +* Refer to https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob for more details. +* +* @this {BlobService} +* @param {string} sourceUri The source blob URI. +* @param {string} targetContainer The target container name. +* @param {string} targetBlob The target blob name. +* @param {object} [options] The request options. +* @param {string} [options.blobTier] For page blobs on premium accounts only. Set the tier of target blob. Refer to BlobUtilities.BlobTier.PremiumPageBlobTier. +* @param {boolean} [options.isIncrementalCopy] If it's incremental copy or not. Refer to https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/incremental-copy-blob +* @param {string} [options.snapshotId] The source blob snapshot identifier. +* @param {object} [options.metadata] The target blob metadata key/value pairs. +* @param {string} [options.leaseId] The target blob lease identifier. +* @param {string} [options.sourceLeaseId] The source blob lease identifier. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {AccessConditions} [options.sourceAccessConditions] The source access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link BlobResult}` will contain +* the blob information. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.startCopyBlob = function (sourceUri, targetContainer, targetBlob, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('startCopyBlob', function (v) { + v.string(sourceUri, 'sourceUri'); + v.string(targetContainer, 'targetContainer'); + v.string(targetBlob, 'targetBlob'); + v.containerNameIsValid(targetContainer); + v.callback(callback); + }); + + var targetResourceName = createResourceName(targetContainer, targetBlob); + + var options = extend(true, {}, userOptions); + + if (options.snapshotId) { + var uri = url.parse(sourceUri, true); + if (uri.query['snapshot']) { + throw new ArgumentError('options.snapshotId', 'Duplicate snapshot supplied in both the source uri and option.'); + } + + uri.search = undefined; + uri.query['snapshot'] = options.snapshotId; + + sourceUri = url.format(uri); + } + + var webResource = WebResource.put(targetResourceName) + .withHeader(HeaderConstants.COPY_SOURCE, sourceUri); + + if (options.isIncrementalCopy) { + webResource.withQueryOption(QueryStringConstants.COMP, 'incrementalcopy'); + } + + webResource.withHeader(HeaderConstants.ACCESS_TIER, options.blobTier); + webResource.withHeader(HeaderConstants.LEASE_ID, options.leaseId); + webResource.withHeader(HeaderConstants.SOURCE_LEASE_ID, options.sourceLeaseId); + webResource.addOptionalMetadataHeaders(options.metadata); + + var processResponseCallback = function (responseObject, next) { + responseObject.blobResult = null; + if (!responseObject.error) { + responseObject.blobResult = new BlobResult(targetContainer, targetBlob); + responseObject.blobResult.getPropertiesFromHeaders(responseObject.response.headers); + + if (options.metadata) { + responseObject.blobResult.metadata = options.metadata; + } + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.blobResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Abort a blob copy operation. +* +* @this {BlobService} +* @param {string} container The destination container name. +* @param {string} blob The destination blob name. +* @param {string} copyId The copy operation identifier. +* @param {object} [options] The request options. +* @param {string} [options.leaseId] The target blob lease identifier. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information if an error occurs; +* `response` will contain information related to this operation. +*/ +BlobService.prototype.abortCopyBlob = function (container, blob, copyId, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('abortCopyBlob', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var resourceName = createResourceName(container, blob); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.put(resourceName) + .withQueryOption(QueryStringConstants.COPY_ID, copyId) + .withQueryOption(QueryStringConstants.COMP, 'copy') + .withHeader(HeaderConstants.COPY_ACTION, 'abort'); + + webResource.withHeader(HeaderConstants.LEASE_ID, options.leaseId); + + var processResponseCallback = function (responseObject, next) { + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Retrieves a shared access signature token. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} [blob] The blob name. +* @param {object} sharedAccessPolicy The shared access policy. +* @param {string} [sharedAccessPolicy.Id] The signed identifier. +* @param {object} [sharedAccessPolicy.AccessPolicy.Permissions] The permission type. +* @param {date|string} [sharedAccessPolicy.AccessPolicy.Start] The time at which the Shared Access Signature becomes valid (The UTC value will be used). +* @param {date|string} [sharedAccessPolicy.AccessPolicy.Expiry] The time at which the Shared Access Signature becomes expired (The UTC value will be used). +* @param {string} [sharedAccessPolicy.AccessPolicy.IPAddressOrRange] An IP address or a range of IP addresses from which to accept requests. When specifying a range, note that the range is inclusive. +* @param {string} [sharedAccessPolicy.AccessPolicy.Protocols] The protocols permitted for a request made with the account SAS. +* Possible values are both HTTPS and HTTP (https,http) or HTTPS only (https). The default value is https,http. +* @param {object} [headers] The optional header values to set for a blob returned wth this SAS. +* @param {string} [headers.cacheControl] The optional value of the Cache-Control response header to be returned when this SAS is used. +* @param {string} [headers.contentType] The optional value of the Content-Type response header to be returned when this SAS is used. +* @param {string} [headers.contentEncoding] The optional value of the Content-Encoding response header to be returned when this SAS is used. +* @param {string} [headers.contentLanguage] The optional value of the Content-Language response header to be returned when this SAS is used. +* @param {string} [headers.contentDisposition] The optional value of the Content-Disposition response header to be returned when this SAS is used. +* @return {string} The shared access signature query string. Note this string does not contain the leading "?". +*/ +BlobService.prototype.generateSharedAccessSignature = function (container, blob, sharedAccessPolicy, headers) { + // check if the BlobService is able to generate a shared access signature + if (!this.storageCredentials) { + throw new ArgumentNullError('storageCredentials'); + } + + if (!this.storageCredentials.generateSignedQueryString) { + throw new ArgumentError('storageCredentials', SR.CANNOT_CREATE_SAS_WITHOUT_ACCOUNT_KEY); + } + + // Validate container name. Blob name is optional. + validate.validateArgs('generateSharedAccessSignature', function (v) { + v.string(container, 'container'); + v.containerNameIsValid(container); + v.object(sharedAccessPolicy, 'sharedAccessPolicy'); + }); + + var resourceType = BlobConstants.ResourceTypes.CONTAINER; + if (blob) { + validate.validateArgs('generateSharedAccessSignature', function (v) { + v.string(blob, 'blob'); + }); + resourceType = BlobConstants.ResourceTypes.BLOB; + } + + if (sharedAccessPolicy.AccessPolicy) { + if (!azureutil.objectIsNull(sharedAccessPolicy.AccessPolicy.Start)) { + if (!_.isDate(sharedAccessPolicy.AccessPolicy.Start)) { + sharedAccessPolicy.AccessPolicy.Start = new Date(sharedAccessPolicy.AccessPolicy.Start); + } + + sharedAccessPolicy.AccessPolicy.Start = azureutil.truncatedISO8061Date(sharedAccessPolicy.AccessPolicy.Start); + } + + if (!azureutil.objectIsNull(sharedAccessPolicy.AccessPolicy.Expiry)) { + if (!_.isDate(sharedAccessPolicy.AccessPolicy.Expiry)) { + sharedAccessPolicy.AccessPolicy.Expiry = new Date(sharedAccessPolicy.AccessPolicy.Expiry); + } + + sharedAccessPolicy.AccessPolicy.Expiry = azureutil.truncatedISO8061Date(sharedAccessPolicy.AccessPolicy.Expiry); + } + } + + var resourceName = createResourceName(container, blob, true); + return this.storageCredentials.generateSignedQueryString(Constants.ServiceType.Blob, resourceName, sharedAccessPolicy, null, { headers: headers, resourceType: resourceType }); +}; + +/** +* Retrieves a blob or container URL. +* +* @param {string} container The container name. +* @param {string} [blob] The blob name. +* @param {string} [sasToken] The Shared Access Signature token. +* @param {boolean} [primary] A boolean representing whether to use the primary or the secondary endpoint. +* @param {string} [snapshotId] The snapshot identifier. +* @return {string} The formatted URL string. +* @example +* var azure = require('azure-storage'); +* var blobService = azure.createBlobService(); +* var sharedAccessPolicy = { +* AccessPolicy: { +* Permissions: azure.BlobUtilities.SharedAccessPermissions.READ, +* Start: startDate, +* Expiry: expiryDate +* }, +* }; +* +* var sasToken = blobService.generateSharedAccessSignature(containerName, blobName, sharedAccessPolicy); +* var sasUrl = blobService.getUrl(containerName, blobName, sasToken); +*/ +BlobService.prototype.getUrl = function (container, blob, sasToken, primary, snapshotId) { + validate.validateArgs('getUrl', function (v) { + v.string(container, 'container'); + v.containerNameIsValid(container); + }); + + var host; + if (!azureutil.objectIsNull(primary) && primary === false) { + host = this.host.secondaryHost; + } else { + host = this.host.primaryHost; + } + + host = azureutil.trimPortFromUri(host); + if (host && host.lastIndexOf('/') !== (host.length - 1)) { + host = host + '/'; + } + + var query = qs.parse(sasToken); + if (snapshotId) { + query[QueryStringConstants.SNAPSHOT] = snapshotId; + } + + var fullPath = url.format({ pathname: this._getPath(createResourceName(container, blob)), query: query }); + return url.resolve(host, fullPath); +}; + +// Page blob methods + +/** +* Creates a page blob of the specified length. If the blob already exists on the service, it will be overwritten. +* To avoid overwriting and instead throw an error if the blob exists, please pass in an accessConditions parameter in the options object. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {int} length The length of the page blob in bytes. +* @param {object} [options] The request options. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {string} [options.leaseId] The target blob lease identifier. +* @param {string} [options.blobTier] For page blobs on premium accounts only. Set the tier of the target blob. Refer to BlobUtilities.BlobTier.PremiumPageBlobTier. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The MD5 hash of the blob content. +* @param {string} [options.sequenceNumber] The blob's sequence number. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information +* if an error occurs; otherwise +* `response` will contain information related to this operation. +*/ +BlobService.prototype.createPageBlob = function (container, blob, length, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('createPageBlob', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.value(length, 'length'); + v.callback(callback); + }); + + if (length && length % BlobConstants.PAGE_SIZE !== 0) { + throw new RangeError(SR.INVALID_PAGE_BLOB_LENGTH); + } + + var options = extend(true, {}, userOptions); + + var resourceName = createResourceName(container, blob); + + var webResource = WebResource.put(resourceName) + .withHeader(HeaderConstants.BLOB_TYPE, BlobConstants.BlobTypes.PAGE) + .withHeader(HeaderConstants.BLOB_CONTENT_LENGTH, length) + .withHeader(HeaderConstants.CONTENT_LENGTH, 0) + .withHeader(HeaderConstants.ACCESS_TIER, options.blobTier) + .withHeader(HeaderConstants.LEASE_ID, options.leaseId); + + BlobResult.setHeadersFromBlob(webResource, options); + + var processResponseCallback = function (responseObject, next) { + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Uploads a page blob from a stream. If the blob already exists on the service, it will be overwritten. +* To avoid overwriting and instead throw an error if the blob exists, please pass in an accessConditions parameter in the options object. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param (Stream) stream Stream to the data to store. +* @param {int} streamLength The length of the stream to upload. +* @param {object} [options] The request options. +* @param {SpeedSummary} [options.speedSummary] The download tracker objects; +* @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. +* @param {string} [options.leaseId] The lease identifier. +* @param {string} [options.transactionalContentMD5] An MD5 hash of the blob content. This hash is used to verify the integrity of the blob during transport. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. +* The default value is false for page blobs. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {string} [options.blobTier] For page blobs on premium accounts only. Set the tier of the target blob. Refer to BlobUtilities.BlobTier.PremiumPageBlobTier. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link BlobResult}` will contain +* the blob information. +* `response` will contain information related to this operation. +* @return {SpeedSummary} +*/ +BlobService.prototype.createPageBlobFromStream = function (container, blob, stream, streamLength, optionsOrCallback, callback) { + return this._createBlobFromStream(container, blob, BlobConstants.BlobTypes.PAGE, stream, streamLength, optionsOrCallback, callback); +}; + +/** +* Provides a stream to write to a page blob. Assumes that the blob exists. +* If it does not, please create the blob using createPageBlob before calling this method or use createWriteStreamNewPageBlob. +* Please note the `Stream` returned by this API should be used with piping. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {object} [options] The request options. +* @param {string} [options.leaseId] The lease identifier. +* @param {string} [options.transactionalContentMD5] The MD5 hash of the blob content. This hash is used to verify the integrity of the blob during transport. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. +* @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. +* The default value is false for page blobs and true for block blobs. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link BlobResult}` will contain +* the blob information. +* `response` will contain information related to this operation. +* @return {Writable} A Node.js Writable stream. +* @example +* var azure = require('azure-storage'); +* var blobService = azure.createBlobService(); +* blobService.createPageBlob(containerName, blobName, 1024, function (err) { +* // Pipe file to a blob +* var stream = fs.createReadStream(fileNameTarget).pipe(blobService.createWriteStreamToExistingPageBlob(containerName, blobName)); +* }); +*/ +BlobService.prototype.createWriteStreamToExistingPageBlob = function (container, blob, optionsOrCallback, callback) { + return this._createWriteStreamToBlob(container, blob, BlobConstants.BlobTypes.PAGE, 0, false, optionsOrCallback, callback); +}; + +/** +* Provides a stream to write to a page blob. Creates the blob before writing data. If the blob already exists on the service, it will be overwritten. +* Please note the `Stream` returned by this API should be used with piping. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {string} length The blob length. +* @param {object} [options] The request options. +* @param {string} [options.leaseId] The lease identifier. +* @param {string} [options.transactionalContentMD5] The MD5 hash of the blob content. This hash is used to verify the integrity of the blob during transport. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. +* @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. +* The default value is false for page blobs and true for block blobs. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {string} [options.blobTier] For page blobs on premium accounts only. Set the tier of the target blob. Refer to BlobUtilities.BlobTier.PremiumPageBlobTier. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link BlobResult}` will contain +* the blob information. +* `response` will contain information related to this operation. +* @return {Writable} A Node.js Writable stream. +* @example +* var azure = require('azure-storage'); +* var blobService = azure.createBlobService(); +* blobService.createPageBlob(containerName, blobName, 1024, function (err) { +* // Pipe file to a blob +* var stream = fs.createReadStream(fileNameTarget).pipe(blobService.createWriteStreamToNewPageBlob(containerName, blobName)); +* }); +*/ +BlobService.prototype.createWriteStreamToNewPageBlob = function (container, blob, length, optionsOrCallback, callback) { + return this._createWriteStreamToBlob(container, blob, BlobConstants.BlobTypes.PAGE, length, true, optionsOrCallback, callback); +}; + +/** +* Updates a page blob from a stream. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {Readable} readStream The Node.js Readable stream. +* @param {int} rangeStart The range start. +* @param {int} rangeEnd The range end. +* @param {object} [options] The request options. +* @param {string} [options.leaseId] The target blob lease identifier. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {string} [options.transactionalContentMD5] An optional hash value used to ensure transactional integrity for the page. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link BlobResult}` will contain +* the page information. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.createPagesFromStream = function (container, blob, readStream, rangeStart, rangeEnd, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('createPagesFromStream', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + if ((rangeEnd - rangeStart) + 1 > BlobConstants.MAX_UPDATE_PAGE_SIZE) { + throw new RangeError(SR.INVALID_PAGE_RANGE_FOR_UPDATE); + } + + var self = this; + if (azureutil.objectIsNull(options.transactionalContentMD5) && options.useTransactionalMD5) { + azureutil.calculateMD5(readStream, BlobConstants.MAX_UPDATE_PAGE_SIZE, options, function (internalBuff, contentMD5) { + options.transactionalContentMD5 = contentMD5; + self._createPages(container, blob, internalBuff, null /* stream */, rangeStart, rangeEnd, options, callback); + }); + } else { + self._createPages(container, blob, null /* text */, readStream, rangeStart, rangeEnd, options, callback); + } +}; + +/** +* Lists page ranges. Lists all of the page ranges by default, or only the page ranges over a specific range of bytes if rangeStart and rangeEnd are specified. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {object} [options] The request options. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {int} [options.rangeStart] The range start. +* @param {int} [options.rangeEnd] The range end. +* @param {string} [options.snapshotId] The snapshot identifier. +* @param {string} [options.leaseId] The target blob lease identifier. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will contain +* the page ranges information, see `[Range]{@link Range}` for detailed information. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.listPageRanges = function (container, blob, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('listPageRanges', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + var resourceName = createResourceName(container, blob); + var webResource = WebResource.get(resourceName) + .withQueryOption(QueryStringConstants.COMP, 'pagelist') + .withQueryOption(QueryStringConstants.SNAPSHOT, options.snapshotId); + + if (options.rangeStart && options.rangeStart % BlobConstants.PAGE_SIZE !== 0) { + throw new RangeError(SR.INVALID_PAGE_START_OFFSET); + } + + if (options.rangeEnd && (options.rangeEnd + 1) % BlobConstants.PAGE_SIZE !== 0) { + throw new RangeError(SR.INVALID_PAGE_END_OFFSET); + } + + BlobResult.setHeadersFromBlob(webResource, options); + + options.requestLocationMode = RequestLocationMode.PRIMARY_OR_SECONDARY; + + var processResponseCallback = function (responseObject, next) { + responseObject.pageRanges = null; + if (!responseObject.error) { + responseObject.pageRanges = []; + + var pageRanges = []; + if (responseObject.response.body.PageList.PageRange) { + pageRanges = responseObject.response.body.PageList.PageRange; + + if (!_.isArray(pageRanges)) { + pageRanges = [pageRanges]; + } + } + + pageRanges.forEach(function (pageRange) { + var range = { + start: parseInt(pageRange.Start, 10), + end: parseInt(pageRange.End, 10) + }; + + responseObject.pageRanges.push(range); + }); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.pageRanges, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Gets page ranges that have been updated or cleared since the snapshot specified by `previousSnapshotTime` was taken. Gets all of the page ranges by default, or only the page ranges over a specific range of bytes if rangeStart and rangeEnd are specified. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {string} previousSnapshotTime The previous snapshot time for comparison. Must be prior to `options.snapshotId` if it's provided. +* @param {object} [options] The request options. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {int} [options.rangeStart] The range start. +* @param {int} [options.rangeEnd] The range end. +* @param {string} [options.snapshotId] The snapshot identifier. +* @param {string} [options.leaseId] The target blob lease identifier. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will contain +* the page ranges diff information, see `[RangeDiff]{@link RangeDiff}` for detailed information. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.getPageRangesDiff = function (container, blob, previousSnapshotTime, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('getPageRangesDiff', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + var resourceName = createResourceName(container, blob); + var webResource = WebResource.get(resourceName) + .withQueryOption(QueryStringConstants.COMP, 'pagelist') + .withQueryOption(QueryStringConstants.SNAPSHOT, options.snapshotId) + .withQueryOption(QueryStringConstants.PREV_SNAPSHOT, previousSnapshotTime); + + if (options.rangeStart && options.rangeStart % BlobConstants.PAGE_SIZE !== 0) { + throw new RangeError(SR.INVALID_PAGE_START_OFFSET); + } + + if (options.rangeEnd && (options.rangeEnd + 1) % BlobConstants.PAGE_SIZE !== 0) { + throw new RangeError(SR.INVALID_PAGE_END_OFFSET); + } + + if (options.rangeEnd && (options.rangeEnd + 1) % BlobConstants.PAGE_SIZE !== 0) { + throw new RangeError(SR.INVALID_PAGE_END_OFFSET); + } + + BlobResult.setHeadersFromBlob(webResource, options); + + options.requestLocationMode = RequestLocationMode.PRIMARY_OR_SECONDARY; + + var processResponseCallback = function (responseObject, next) { + responseObject.pageRangesDiff = null; + if (!responseObject.error) { + responseObject.pageRangesDiff = []; + + if (responseObject.response.body.PageList.PageRange) { + var updatedPageRanges = responseObject.response.body.PageList.PageRange; + + if (!_.isArray(updatedPageRanges)) { + updatedPageRanges = [updatedPageRanges]; + } + + updatedPageRanges.forEach(function (pageRange) { + var range = { + start: parseInt(pageRange.Start, 10), + end: parseInt(pageRange.End, 10), + isCleared: false + }; + + responseObject.pageRangesDiff.push(range); + }); + } + + if (responseObject.response.body.PageList.ClearRange) { + var clearedPageRanges = responseObject.response.body.PageList.ClearRange; + + if (!_.isArray(clearedPageRanges)) { + clearedPageRanges = [clearedPageRanges]; + } + + clearedPageRanges.forEach(function (pageRange) { + var range = { + start: parseInt(pageRange.Start, 10), + end: parseInt(pageRange.End, 10), + isCleared: true + }; + + responseObject.pageRangesDiff.push(range); + }); + } + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.pageRangesDiff, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Clears a range of pages. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {int} rangeStart The range start. +* @param {int} rangeEnd The range end. +* @param {object} [options] The request options. +* @param {string} [options.leaseId] The target blob lease identifier. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information +* if an error occurs; otherwise +* `response` will contain information related to this operation. +*/ +BlobService.prototype.clearPageRange = function (container, blob, rangeStart, rangeEnd, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('clearPageRange', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var request = this._updatePageBlobPagesImpl(container, blob, rangeStart, rangeEnd, BlobConstants.PageWriteOptions.CLEAR, options); + + var self = this; + var processResponseCallback = function (responseObject, next) { + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + self.performRequest(request, null, options, processResponseCallback); +}; + +/** +* Resizes a page blob. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {String} size The size of the page blob, in bytes. +* @param {object} [options] The request options. +* @param {string} [options.leaseId] The blob lease identifier. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link BlobResult}` will contain +* the page information. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.resizePageBlob = function (container, blob, size, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('resizePageBlob', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var resourceName = createResourceName(container, blob); + var webResource = WebResource.put(resourceName) + .withQueryOption(QueryStringConstants.COMP, 'properties') + .withHeader(HeaderConstants.LEASE_ID, options.leaseId); + + if (size && size % BlobConstants.PAGE_SIZE !== 0) { + throw new RangeError(SR.INVALID_PAGE_BLOB_LENGTH); + } + + webResource.withHeader(HeaderConstants.BLOB_CONTENT_LENGTH, size); + + this._setBlobPropertiesHelper({ + webResource: webResource, + options: options, + container: container, + blob: blob, + callback: callback + }); + +}; + +/** +* Sets the page blob's sequence number. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {SequenceNumberAction} sequenceNumberAction A value indicating the operation to perform on the sequence number. +* The allowed values are defined in azure.BlobUtilities.SequenceNumberAction. +* @param {string} sequenceNumber The sequence number. The value of the sequence number must be between 0 and 2^63 - 1. +* Set this parameter to null if this operation is an increment action. +* @param {object} [options] The request options. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link BlobResult}` will contain +* the page information. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.setPageBlobSequenceNumber = function (container, blob, sequenceNumberAction, sequenceNumber, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('setPageBlobSequenceNumber', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + if (sequenceNumberAction === BlobUtilities.SequenceNumberAction.INCREMENT) { + if (!azureutil.objectIsNull(sequenceNumber)) { + throw new ArgumentError('sequenceNumber', SR.BLOB_INVALID_SEQUENCE_NUMBER); + } + } else { + if (azureutil.objectIsNull(sequenceNumber)) { + throw new ArgumentNullError('sequenceNumber', util.format(SR.ARGUMENT_NULL_OR_EMPTY, 'sequenceNumber')); + } + } + + var options = extend(true, {}, userOptions); + var resourceName = createResourceName(container, blob); + var webResource = WebResource.put(resourceName) + .withQueryOption(QueryStringConstants.COMP, 'properties') + .withHeader(HeaderConstants.SEQUENCE_NUMBER_ACTION, sequenceNumberAction); + + if (sequenceNumberAction !== BlobUtilities.SequenceNumberAction.INCREMENT) { + webResource.withHeader(HeaderConstants.SEQUENCE_NUMBER, sequenceNumber); + } + + var processResponseCallback = function (responseObject, next) { + responseObject.blobResult = null; + if (!responseObject.error) { + responseObject.blobResult = new BlobResult(container, blob); + responseObject.blobResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.blobResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +// Block blob methods + +/** +* Uploads a block blob from a stream. If the blob already exists on the service, it will be overwritten. +* To avoid overwriting and instead throw an error if the blob exists, please pass in an accessConditions parameter in the options object. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param (Stream) stream Stream to the data to store. +* @param {int} streamLength The length of the stream to upload. +* @param {object} [options] The request options. +* @param {SpeedSummary} [options.speedSummary] The download tracker objects. +* @param {int} [options.blockSize] The size of each block. Maximum is 100MB. +* @param {string} [options.blockIdPrefix] The prefix to be used to generate the block id. +* @param {string} [options.leaseId] The lease identifier. +* @param {string} [options.transactionalContentMD5] The MD5 hash of the blob content. This hash is used to verify the integrity of the blob during transport. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. +* @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. The default value is true for block blobs. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link BlobResult}` will contain +* the blob information. +* `response` will contain information related to this operation. +* @return {SpeedSummary} +*/ +BlobService.prototype.createBlockBlobFromStream = function (container, blob, stream, streamLength, optionsOrCallback, callback) { + return this._createBlobFromStream(container, blob, BlobConstants.BlobTypes.BLOCK, stream, streamLength, optionsOrCallback, callback); +}; + +/** +* Uploads a block blob from a text string. If the blob already exists on the service, it will be overwritten. +* To avoid overwriting and instead throw an error if the blob exists, please pass in an accessConditions parameter in the options object. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {string|object} text The blob text, as a string or in a Buffer. +* @param {object} [options] The request options. +* @param {string} [options.leaseId] The lease identifier. +* @param {string} [options.transactionalContentMD5] The MD5 hash of the blob content. This hash is used to verify the integrity of the blob during transport. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. The default value is true for block blobs. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link BlobResult}` will contain +* the blob information. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.createBlockBlobFromText = function (container, blob, text, optionsOrCallback, callback) { + return this._createBlobFromText(container, blob, BlobConstants.BlobTypes.BLOCK, text, optionsOrCallback, callback); +}; + +/** +* Provides a stream to write to a block blob. If the blob already exists on the service, it will be overwritten. +* To avoid overwriting and instead throw an error if the blob exists, please pass in an accessConditions parameter in the options object. +* Please note the `Stream` returned by this API should be used with piping. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {object} [options] The request options. +* @param {int} [options.blockSize] The size of each block. Maximum is 100MB. +* @param {string} [options.blockIdPrefix] The prefix to be used to generate the block id. +* @param {string} [options.leaseId] The lease identifier. +* @param {string} [options.transactionalContentMD5] The MD5 hash of the blob content. This hash is used to verify the integrity of the blob during transport. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. +* @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. +* The default value is false for page blobs and true for block blobs. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link BlobResult}` will contain +* the blob information. +* `response` will contain information related to this operation. +* @return {Writable} A Node.js Writable stream. +* @example +* var azure = require('azure-storage'); +* var blobService = azure.createBlobService(); +* var stream = fs.createReadStream(fileNameTarget).pipe(blobService.createWriteStreamToBlockBlob(containerName, blobName, { blockIdPrefix: 'block' })); +*/ +BlobService.prototype.createWriteStreamToBlockBlob = function (container, blob, optionsOrCallback, callback) { + return this._createWriteStreamToBlob(container, blob, BlobConstants.BlobTypes.BLOCK, 0, false, optionsOrCallback, callback); +}; + +/** +* Creates a new block to be committed as part of a blob. +* +* @this {BlobService} +* @param {string} blockId The block identifier. +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {Readable} readStream The Node.js Readable stream. +* @param {int} streamLength The stream length. +* @param {object} [options] The request options. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {string} [options.leaseId] The target blob lease identifier. +* @param {string} [options.transactionalContentMD5] An MD5 hash of the block content. This hash is used to verify the integrity of the block during transport. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information +* if an error occurs; otherwise +* `response` will contain information related to this operation. +*/ +BlobService.prototype.createBlockFromStream = function (blockId, container, blob, readStream, streamLength, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('createBlockFromStream', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.exists(readStream, 'readStream'); + v.value(streamLength, 'streamLength'); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + if (streamLength > BlobConstants.MAX_BLOCK_BLOB_BLOCK_SIZE) { + throw new RangeError(SR.INVALID_STREAM_LENGTH); + } else { + this._createBlock(blockId, container, blob, null, readStream, streamLength, options, callback); + } +}; + +/** +* Creates a new block to be committed as part of a blob. +* +* @this {BlobService} +* @param {string} blockId The block identifier. +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {string|buffer} content The block content. +* @param {object} [options] The request options. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {string} [options.leaseId] The target blob lease identifier. +* @param {string} [options.transactionalContentMD5] An MD5 hash of the block content. This hash is used to verify the integrity of the block during transport. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information +* if an error occurs; otherwise +* `response` will contain information related to this operation. +*/ +BlobService.prototype.createBlockFromText = function (blockId, container, blob, content, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('createBlockFromText', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var contentLength = (Buffer.isBuffer(content) ? content.length : Buffer.byteLength(content)); + + if (contentLength > BlobConstants.MAX_BLOCK_BLOB_BLOCK_SIZE) { + throw new RangeError(SR.INVALID_TEXT_LENGTH); + } else { + this._createBlock(blockId, container, blob, content, null, contentLength, options, callback); + } +}; + +/** +* Creates a new block to be committed as part of a blob from an URL of an Azure blob or file. +* +* @this {BlobService} +* @param {string} blockId The block identifier. +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {string} sourceURL The URL of the source data. +* It can point to any Azure Blob or File, that is either public or has a shared access signature attached. +* @param {int} sourceRangeStart The start of the range of bytes(inclusive) that has to be taken from the copy source. +* @param {int} sourceRangeEnd The end of the range of bytes(inclusive) that has to be taken from the copy source. +* @param {object} [options] The request options. +* @param {string} [options.leaseId] The target blob lease identifier. +* @param {string} [options.transactionalContentMD5] An MD5 hash of the block content. This hash is used to verify the integrity of the block during transport. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information +* if an error occurs; otherwise +* `response` will contain information related to this operation. +*/ +BlobService.prototype.createBlockFromURL = function (blockId, container, blob, sourceURL, sourceRangeStart, sourceRangeEnd, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('createBlockFromURL', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.string(sourceURL, 'sourceURL'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var resourceName = createResourceName(container, blob); + + var webResource = WebResource.put(resourceName) + .withQueryOption(QueryStringConstants.COMP, 'block') + .withQueryOption(QueryStringConstants.BLOCK_ID, Buffer.from(blockId).toString('base64')) + .withHeader(HeaderConstants.COPY_SOURCE, sourceURL); + + options.sourceRangeStart = sourceRangeStart; + options.sourceRangeEnd = sourceRangeEnd; + BlobResult.setHeadersFromBlob(webResource, options); + + var processResponseCallback = function (responseObject, next) { + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Creates a new block to be committed as part of a block blob. +* @ignore +* +* @this {BlobService} +* @param {string} blockId The block identifier. +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {string|buffer} content The block content. +* @param {Stream} stream The stream to the data to store. +* @param {int} length The length of the stream or text to upload. +* @param {object} [options] The request options. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {string} [options.leaseId] The target blob lease identifier. +* @param {string} [options.transactionalContentMD5] An MD5 hash of the block content. This hash is used to verify the integrity of the block during transport. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information +* if an error occurs; otherwise +* `response` will contain information related to this operation. +*/ +BlobService.prototype._createBlock = function (blockId, container, blob, content, stream, length, options, callback) { + var resourceName = createResourceName(container, blob); + + var self = this; + var startCreateBlock = function () { + var webResource = WebResource.put(resourceName) + .withQueryOption(QueryStringConstants.COMP, 'block') + .withQueryOption(QueryStringConstants.BLOCK_ID, Buffer.from(blockId).toString('base64')) + .withHeader(HeaderConstants.CONTENT_LENGTH, length); + + BlobResult.setHeadersFromBlob(webResource, options); + + var processResponseCallback = function (responseObject, next) { + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + if (!azureutil.objectIsNull(content)) { + self.performRequest(webResource, content, options, processResponseCallback); + } else { + self.performRequestOutputStream(webResource, stream, options, processResponseCallback); + } + }; + + if (azureutil.objectIsNull(options.transactionalContentMD5) && options.useTransactionalMD5) { + if (!azureutil.objectIsNull(content)) { + options.transactionalContentMD5 = azureutil.getContentMd5(content); + startCreateBlock(); + } else { + azureutil.calculateMD5(stream, length, options, function (internalBuff, contentMD5) { + options.transactionalContentMD5 = contentMD5; + content = internalBuff; + length = internalBuff.length; + startCreateBlock(); + }); + } + } else { + startCreateBlock(); + } +}; + +/** +* Writes a blob by specifying the list of block IDs that make up the blob. +* In order to be written as part of a blob, a block must have been successfully written to the server in a prior +* createBlock operation. +* Note: If no valid list is specified in the blockList parameter, blob would be updated with empty content, +* i.e. existing blocks in the blob will be removed, this behavior is kept for backward compatibility consideration. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {object} blockList The wrapper for block ID list contains block IDs that make up the blob. +* Three kinds of list are provided, please choose one to use according to requirement. +* For more background knowledge, please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/put-block-list +* @param {string[]} [blockList.LatestBlocks] The list contains block IDs that make up the blob sequentially. +* All the block IDs in this list will be specified within Latest element. +* Choose this list to contain block IDs indicates that the Blob service should first search +* the uncommitted block list, and then the committed block list for the named block. +* @param {string[]} [blockList.CommittedBlocks] The list contains block IDs that make up the blob sequentially. +* All the block IDs in this list will be specified within Committed element. +* Choose this list to contain block IDs indicates that the Blob service should only search +* the committed block list for the named block. +* @param {string[]} [blockList.UncommittedBlocks] The list contains block IDs that make up the blob sequentially. +* All the block IDs in this list will be specified within Uncommitted element. +* Choose this list to contain block IDs indicates that the Blob service should only search +* the uncommitted block list for the named block. +* @param {object} [options] The request options. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {string} [options.leaseId] The target blob lease identifier. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link BlobResult}` will contain +* the blob information. +* `response` will contain information related to this operation. +* @example +* var azure = require('azure-storage'); +* var blobService = azure.createBlobService(); +* blobService.createBlockFromText("sampleBlockName", containerName, blobName, "sampleBlockContent", function(error) { +* assert.equal(error, null); +* // In this example, LatestBlocks is used, we hope the Blob service first search +* // the uncommitted block list, and then the committed block list for the named block "sampleBlockName", +* // and thus make sure the block is with latest content. +* blobService.commitBlocks(containerName, blobName, { LatestBlocks: ["sampleBlockName"] }, function(error) { +* assert.equal(error, null); +* }); +* }); +* + */ +BlobService.prototype.commitBlocks = function (container, blob, blockList, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('commitBlocks', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.object(blockList, 'blockList'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var blockListXml = BlockListResult.serialize(blockList); + + var resourceName = createResourceName(container, blob); + var options = extend(true, {}, userOptions); + var webResource = WebResource.put(resourceName) + .withQueryOption(QueryStringConstants.COMP, 'blocklist') + .withHeader(HeaderConstants.CONTENT_LENGTH, Buffer.byteLength(blockListXml)) + .withBody(blockListXml); + + BlobResult.setPropertiesFromBlob(webResource, options); + + var processResponseCallback = function (responseObject, next) { + responseObject.blobResult = new BlobResult(container, blob); + responseObject.blobResult.list = null; + if (!responseObject.error) { + responseObject.blobResult.getPropertiesFromHeaders(responseObject.response.headers); + responseObject.blobResult.list = blockList; + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.blobResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, webResource.body, options, processResponseCallback); +}; + +/** +* Retrieves the list of blocks that have been uploaded as part of a block blob. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {BlockListFilter} blocklisttype The type of block list to retrieve. +* @param {object} [options] The request options. +* @param {string} [options.snapshotId] The source blob snapshot identifier. +* @param {string} [options.leaseId] The target blob lease identifier. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will contain +* the blocklist information. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.listBlocks = function (container, blob, blocklisttype, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('listBlocks', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var resourceName = createResourceName(container, blob); + var options = extend(true, {}, userOptions); + var webResource = WebResource.get(resourceName) + .withQueryOption(QueryStringConstants.COMP, 'blocklist') + .withQueryOption(QueryStringConstants.BLOCK_LIST_TYPE, blocklisttype) + .withQueryOption(QueryStringConstants.SNAPSHOT, options.snapshotId); + + options.requestLocationMode = RequestLocationMode.PRIMARY_OR_SECONDARY; + + var processResponseCallback = function (responseObject, next) { + responseObject.blockListResult = null; + if (!responseObject.error) { + responseObject.blockListResult = BlockListResult.parse(responseObject.response.body.BlockList); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.blockListResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Generate a random block id prefix +*/ +BlobService.prototype.generateBlockIdPrefix = function () { + var prefix = Math.floor(Math.random() * 0x100000000).toString(16); + return azureutil.zeroPaddingString(prefix, 8); +}; + +/** +* Get a block id according to prefix and block number +*/ +BlobService.prototype.getBlockId = function (prefix, number) { + return prefix + '-' + azureutil.zeroPaddingString(number, 6); +}; + +// Append blob methods + +/** +* Creates an empty append blob. If the blob already exists on the service, it will be overwritten. +* To avoid overwriting and instead throw an error if the blob exists, please pass in an accessConditions parameter in the options object. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {object} [options] The request options. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {string} [options.leaseId] The target blob lease identifier. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information +* if an error occurs; otherwise +* `response` will contain information related to this operation. +*/ +BlobService.prototype.createOrReplaceAppendBlob = function (container, blob, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('createOrReplaceAppendBlob', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + var resourceName = createResourceName(container, blob); + + var webResource = WebResource.put(resourceName) + .withHeader(HeaderConstants.BLOB_TYPE, BlobConstants.BlobTypes.APPEND) + .withHeader(HeaderConstants.LEASE_ID, options.leaseId) + .withHeader(HeaderConstants.CONTENT_LENGTH, 0); + + BlobResult.setHeadersFromBlob(webResource, options); + + var processResponseCallback = function (responseObject, next) { + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Uploads an append blob from a stream. If the blob already exists on the service, it will be overwritten. +* To avoid overwriting and instead throw an error if the blob exists, please pass in an accessConditions parameter in the options object. +* This API should be used strictly in a single writer scenario because the API internally uses the append-offset conditional header to avoid duplicate blocks. +* If you are guaranteed to have a single writer scenario, please look at options.absorbConditionalErrorsOnRetry and see if setting this flag to true is acceptable for you. +* If you want to append data to an already existing blob, please look at appendFromStream. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param (Stream) stream Stream to the data to store. +* @param {int} streamLength The length of the stream to upload. +* @param {object} [options] The request options. +* @param {bool} [options.absorbConditionalErrorsOnRetry] Specifies whether to absorb the conditional error on retry. +* @param {SpeedSummary} [options.speedSummary] The download tracker objects. +* @param {string} [options.leaseId] The lease identifier. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. The default value is true for block blobs. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link BlobResult}` will contain +* the blob information. +* `response` will contain information related to this operation. +* @return {SpeedSummary} +*/ +BlobService.prototype.createAppendBlobFromStream = function (container, blob, stream, streamLength, optionsOrCallback, callback) { + return this._createBlobFromStream(container, blob, BlobConstants.BlobTypes.APPEND, stream, streamLength, optionsOrCallback, callback); +}; + +/** +* Uploads an append blob from a text string. If the blob already exists on the service, it will be overwritten. +* To avoid overwriting and instead throw an error if the blob exists, please pass in an accessConditions parameter in the options object. +* This API should be used strictly in a single writer scenario because the API internally uses the append-offset conditional header to avoid duplicate blocks. +* If you are guaranteed to have a single writer scenario, please look at options.absorbConditionalErrorsOnRetry and see if setting this flag to true is acceptable for you. +* If you want to append data to an already existing blob, please look at appendFromText. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {string|object} text The blob text, as a string or in a Buffer. +* @param {object} [options] The request options. +* @param {bool} [options.absorbConditionalErrorsOnRetry] Specifies whether to absorb the conditional error on retry. +* @param {string} [options.leaseId] The lease identifier. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. The default value is true for block blobs. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link BlobResult}` will contain +* the blob information. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.createAppendBlobFromText = function (container, blob, text, optionsOrCallback, callback) { + return this._createBlobFromText(container, blob, BlobConstants.BlobTypes.APPEND, text, optionsOrCallback, callback); +}; + +/** +* Provides a stream to write to a new append blob. If the blob already exists on the service, it will be overwritten. +* To avoid overwriting and instead throw an error if the blob exists, please pass in an accessConditions parameter in the options object. +* This API should be used strictly in a single writer scenario because the API internally uses the append-offset conditional header to avoid duplicate blocks. +* If you are guaranteed to have a single writer scenario, please look at options.absorbConditionalErrorsOnRetry and see if setting this flag to true is acceptable for you. +* Please note the `Stream` returned by this API should be used with piping. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {object} [options] The request options. +* @param {bool} [options.absorbConditionalErrorsOnRetry] Specifies whether to absorb the conditional error on retry. +* @param {string} [options.leaseId] The lease identifier. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. +* The default value is false for page blobs and true for block blobs. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback The callback function. +* @return {Writable} A Node.js Writable stream. +* @example +* var azure = require('azure-storage'); +* var blobService = azure.createBlobService(); +* var stream = fs.createReadStream(fileNameTarget).pipe(blobService.createWriteStreamToAppendBlob(containerName, blobName)); +*/ +BlobService.prototype.createWriteStreamToNewAppendBlob = function (container, blob, optionsOrCallback, callback) { + return this._createWriteStreamToBlob(container, blob, BlobConstants.BlobTypes.APPEND, 0, true, optionsOrCallback, callback); +}; + +/** +* Provides a stream to write to an existing append blob. Assumes that the blob exists. +* If it does not, please create the blob using createAppendBlob before calling this method or use createWriteStreamToNewAppendBlob. +* This API should be used strictly in a single writer scenario because the API internally uses the append-offset conditional header to avoid duplicate blocks. +* If you are guaranteed to have a single writer scenario, please look at options.absorbConditionalErrorsOnRetry and see if setting this flag to true is acceptable for you. +* Please note the `Stream` returned by this API should be used with piping. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {object} [options] The request options. +* @param {bool} [options.absorbConditionalErrorsOnRetry] Specifies whether to absorb the conditional error on retry. +* @param {string} [options.leaseId] The lease identifier. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. +* The default value is false for page blobs and true for block blobs. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback The callback function. +* @return {Writable} A Node.js Writable stream. +* @example +* var azure = require('azure-storage'); +* var blobService = azure.createBlobService(); +* var stream = fs.createReadStream(fileNameTarget).pipe(blobService.createWriteStreamToAppendBlob(containerName, blobName)); +*/ +BlobService.prototype.createWriteStreamToExistingAppendBlob = function (container, blob, optionsOrCallback, callback) { + return this._createWriteStreamToBlob(container, blob, BlobConstants.BlobTypes.APPEND, 0, false, optionsOrCallback, callback); +}; + +/** +* Appends to an append blob from a stream. Assumes the blob already exists on the service. +* This API should be used strictly in a single writer scenario because the API internally uses the append-offset conditional header to avoid duplicate blocks. +* If you are guaranteed to have a single writer scenario, please look at options.absorbConditionalErrorsOnRetry and see if setting this flag to true is acceptable for you. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param (Stream) stream Stream to the data to store. +* @param {int} streamLength The length of the stream to upload. +* @param {object} [options] The request options. +* @param {bool} [options.absorbConditionalErrorsOnRetry] Specifies whether to absorb the conditional error on retry. +* @param {SpeedSummary} [options.speedSummary] The download tracker objects. +* @param {string} [options.leaseId] The lease identifier. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link BlobResult}` will contain +* the blob information. +* `response` will contain information related to this operation. +* @return {SpeedSummary} +*/ +BlobService.prototype.appendFromStream = function (container, blob, stream, streamLength, optionsOrCallback, callback) { + var options; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { options = o; callback = c; }); + + validate.validateArgs('appendFromStream', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.exists(stream, 'stream'); + v.value(streamLength, 'streamLength'); + v.callback(callback); + }); + + return this._uploadBlobFromStream(false, container, blob, BlobConstants.BlobTypes.APPEND, stream, streamLength, options, callback); +}; + +/** +* Appends to an append blob from a text string. Assumes the blob already exists on the service. +* This API should be used strictly in a single writer scenario because the API internally uses the append-offset conditional header to avoid duplicate blocks. +* If you are guaranteed to have a single writer scenario, please look at options.absorbConditionalErrorsOnRetry and see if setting this flag to true is acceptable for you. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {string|object} text The blob text, as a string or in a Buffer. +* @param {object} [options] The request options. +* @param {bool} [options.absorbConditionalErrorsOnRetry] Specifies whether to absorb the conditional error on retry. +* @param {string} [options.leaseId] The lease identifier. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. The default value is true for block blobs. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link BlobResult}` will contain +* the blob information. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.appendFromText = function (container, blob, text, optionsOrCallback, callback) { + return this._uploadBlobFromText(false, container, blob, BlobConstants.BlobTypes.APPEND, text, optionsOrCallback, callback); +}; + + +/** +* Creates a new block from a read stream to be appended to an append blob. +* If the sequence of data to be appended is important, please use this API strictly in a single writer. +* If you are guaranteed to have a single writer scenario, please look at options.absorbConditionalErrorsOnRetry and see if setting this flag to true is acceptable for you. +* If the sequence of data to be appended is not important, this API can be used in parallel, +* in this case, options.appendPosition can be left without settings. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {Readable} readStream The Node.js Readable stream. +* @param {int} streamLength The stream length. +* @param {object} [options] The request options. +* @param {bool} [options.absorbConditionalErrorsOnRetry] Specifies whether to absorb the conditional error on retry. +* @param {int} [options.maxBlobSize] The max length in bytes allowed for the append blob to grow to. +* @param {int} [options.appendPosition] The number indicating the byte offset to check for. The append will succeed only if the end position of the blob is equal to this number. +* @param {string} [options.leaseId] The target blob lease identifier. +* @param {string} [options.transactionalContentMD5] An MD5 hash of the block content. This hash is used to verify the integrity of the block during transport. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link BlobResult}` will contain +* the blob information. +* `response` will contain information related to this operation. +*/ +BlobService.prototype.appendBlockFromStream = function (container, blob, readStream, streamLength, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('appendBlockFromStream', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.exists(readStream, 'readStream'); + v.value(streamLength, 'streamLength'); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + if (streamLength > BlobConstants.MAX_APPEND_BLOB_BLOCK_SIZE) { + throw new RangeError(SR.INVALID_STREAM_LENGTH); + } else { + this._appendBlock(container, blob, null, readStream, streamLength, options, callback); + } +}; + +/** +* Creates a new block from a text to be appended to an append blob. +* If the sequence of data to be appended is important, please use this API strictly in a single writer. +* If you are guaranteed to have a single writer scenario, please look at options.absorbConditionalErrorsOnRetry and see if setting this flag to true is acceptable for you. +* If the sequence of data to be appended is not important, this API can be used in parallel, +* in this case, options.appendPosition can be left without settings. +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {string|object} content The block text, as a string or in a Buffer. +* @param {object} [options] The request options. +* @param {bool} [options.absorbConditionalErrorsOnRetry] Specifies whether to absorb the conditional error on retry. +* @param {int} [options.maxBlobSize] The max length in bytes allowed for the append blob to grow to. +* @param {int} [options.appendPosition] The number indicating the byte offset to check for. The append will succeed only if the end position of the blob is equal to this number. +* @param {string} [options.leaseId] The target blob lease identifier. +* @param {string} [options.transactionalContentMD5] An MD5 hash of the block content. This hash is used to verify the integrity of the block during transport. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information +* if an error occurs; otherwise +* `response` will contain information related to this operation. +*/ +BlobService.prototype.appendBlockFromText = function (container, blob, content, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('appendBlockFromText', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var contentLength = (Buffer.isBuffer(content) ? content.length : Buffer.byteLength(content)); + if (contentLength > BlobConstants.MAX_APPEND_BLOB_BLOCK_SIZE) { + throw new RangeError(SR.INVALID_TEXT_LENGTH); + } else { + this._appendBlock(container, blob, content, null, contentLength, options, callback); + } +}; + +// Private methods + +/** +* Creates a new blob from a stream. If the blob already exists on the service, it will be overwritten. +* To avoid overwriting and instead throw an error if the blob exists, please pass in an accessConditions parameter in the options object. +* +* @ignore +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {BlobType} blobType The blob type. +* @param (Stream) stream Stream to the data to store. +* @param {int} streamLength The length of the stream to upload. +* @param {object} [options] The request options. +* @param {SpeedSummary} [options.speedSummary] The upload tracker objects. +* @param {bool} [options.absorbConditionalErrorsOnRetry] Specifies whether to absorb the conditional error on retry. (For append blob only) +* @param {string} [options.blockIdPrefix] The prefix to be used to generate the block id. (For block blob only) +* @param {string} [options.leaseId] The lease identifier. +* @param {string} [options.transactionalContentMD5] The MD5 hash of the blob content. This hash is used to verify the integrity of the blob during transport. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. +* @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. The default value is true for block blobs. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {string} [options.blobTier] For page blobs on premium accounts only. Set the tier of the target blob. Refer to BlobUtilities.BlobTier.PremiumPageBlobTier. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback The callback function. +* @return {SpeedSummary} +*/ +BlobService.prototype._createBlobFromStream = function (container, blob, blobType, stream, streamLength, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('_createBlobFromStream', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.blobTypeIsValid(blobType); + v.exists(stream, 'stream'); + v.value(streamLength, 'streamLength'); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + var self = this; + var creationCallback = function (createError, createBlob, createResponse) { + if (createError) { + callback(createError, createBlob, createResponse); + } else { + self._uploadBlobFromStream(true, container, blob, blobType, stream, streamLength, options, callback); + } + }; + + this._createBlob(container, blob, blobType, streamLength, options, creationCallback); + + return options.speedSummary; +}; + +/** +* Uploads a block blob or an append blob from a text string. If the blob already exists on the service, it will be overwritten. +* To avoid overwriting and instead throw an error if the blob exists, please pass in an accessConditions parameter in the options object. +* +* @ignore +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {BlobType} blobType The blob type. +* @param {string|buffer} content The blob text, as a string or in a Buffer. +* @param {object} [options] The request options. +* @param {bool} [options.absorbConditionalErrorsOnRetry] Specifies whether to absorb the conditional error on retry. (For append blob only) +* @param {string} [options.leaseId] The lease identifier. +* @param {string} [options.transactionalContentMD5] The MD5 hash of the blob content. This hash is used to verify the integrity of the blob during transport. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. The default value is true for block blobs. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will contain +* information about the blob. +* `response` will contain information related to this operation. +*/ +BlobService.prototype._createBlobFromText = function (container, blob, blobType, content, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('_createBlobFromText', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.blobTypeIsValid(blobType); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + var self = this; + var creationCallback = function (createError, createBlob, createResponse) { + if (createError) { + callback(createError, createBlob, createResponse); + } else { + self._uploadBlobFromText(true, container, blob, blobType, content, options, callback); + } + }; + + var contentLength = azureutil.objectIsNull(content) ? 0 : ((Buffer.isBuffer(content) ? content.length : Buffer.byteLength(content))); + this._createBlob(container, blob, blobType, contentLength, options, creationCallback); + + return options.speedSummary; +}; + +/** +* Provides a stream to write to a block blob or an append blob. +* +* @ignore +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {BlobType} blobType The blob type. +* @param {int} length The blob length. +* @param {bool} createNewBlob Specifies whether create a new blob. +* @param {object} [options] The request options. +* @param {bool} [options.absorbConditionalErrorsOnRetry] Specifies whether to absorb the conditional error on retry. (For append blob only) +* @param {string} [options.blockSize] The size of each block. Maximum is 100MB. (For block blob only) +* @param {string} [options.blockIdPrefix] The prefix to be used to generate the block id. (For block blob only) +* @param {string} [options.leaseId] The lease identifier. +* @param {string} [options.transactionalContentMD5] The MD5 hash of the blob content. This hash is used to verify the integrity of the blob during transport. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. +* @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. +* The default value is false for page blobs and true for block blobs. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {string} [options.blobTier] For page blobs on premium accounts only. Set the tier of the target blob. Refer to BlobUtilities.BlobTier.PremiumPageBlobTier. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback The callback function. +* @return {Writable} A Node.js Writable stream. +*/ +BlobService.prototype._createWriteStreamToBlob = function (container, blob, blobType, length, createNewBlob, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('_createWriteStreamToBlob', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.blobTypeIsValid(blobType); + }); + + var options = extend(true, {}, userOptions); + + var sizeLimitation; + if (blobType === BlobConstants.BlobTypes.BLOCK) { + // default to true, unless explicitly set to false + options.storeBlobContentMD5 = options.storeBlobContentMD5 === false ? false : true; + sizeLimitation = options.blockSize || BlobConstants.DEFAULT_WRITE_BLOCK_SIZE_IN_BYTES; + } else if (blobType == BlobConstants.BlobTypes.PAGE) { + sizeLimitation = BlobConstants.DEFAULT_WRITE_PAGE_SIZE_IN_BYTES; + } else if (blobType == BlobConstants.BlobTypes.APPEND) { + sizeLimitation = BlobConstants.DEFAULT_WRITE_BLOCK_SIZE_IN_BYTES; + } + + var stream = new ChunkStream({ calcContentMd5: options.storeBlobContentMD5 }); + stream._highWaterMark = sizeLimitation; + + stream.pause(); //Immediately pause the stream in order to wait for the destination to getting ready + + var self = this; + var createCallback = function (createError, createBlob, createResponse) { + if (createError) { + if (callback) { + callback(createError, createBlob, createResponse); + } + } else { + self._uploadBlobFromStream(createNewBlob, container, blob, blobType, stream, null, options, function (error, blob, response) { + if (error) { + stream.emit('error', error); + } + + if (callback) { + callback(error, blob, response); + } + }); + } + }; + + if (createNewBlob === true) { + this._createBlob(container, blob, blobType, length, options, createCallback); + } else { + createCallback(); + } + + return stream; +}; + +/** +* Upload blob content from a stream. Assumes the blob already exists. +* +* @ignore +* +* @this {BlobService} +* @param {bool} isNewBlob Specifies whether the blob is newly created. +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {BlobType} blobType The blob type. +* @param (Stream) stream Stream to the data to store. +* @param {int} streamLength The length of the stream to upload. +* @param {object} [options] The request options. +* @param {SpeedSummary} [options.speedSummary] The upload tracker objects. +* @param {bool} [options.absorbConditionalErrorsOnRetry] Specifies whether to absorb the conditional error on retry. (For append blob only) +* @param {string} [options.blockIdPrefix] The prefix to be used to generate the block id. (For block blob only) +* @param {int} [options.blockSize] The size of each block. Maximum is 100MB. (For block blob only) +* @param {string} [options.leaseId] The lease identifier. +* @param {string} [options.transactionalContentMD5] The MD5 hash of the blob content. This hash is used to verify the integrity of the blob during transport. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. +* @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. The default value is true for block blobs. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback The callback function. +* @return {SpeedSummary} +*/ +BlobService.prototype._uploadBlobFromStream = function (isNewBlob, container, blob, blobType, stream, streamLength, optionsOrCallback, callback) { + var options; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { options = o; callback = c; }); + options.speedSummary = options.speedSummary || new SpeedSummary(blob); + + if (blobType === BlobConstants.BlobTypes.BLOCK) { + // default to true, unless explicitly set to false + options.storeBlobContentMD5 = options.storeBlobContentMD5 === false ? false : true; + } + + stream.pause(); + + var self = this; + var startUpload = function () { + var putBlockBlobFromStream = function () { + if (streamLength > 0 && azureutil.objectIsNull(azureutil.tryGetValueChain(options, ['contentSettings', 'contentMD5'], null)) && options.storeBlobContentMD5) { + azureutil.calculateMD5(stream, Math.min(self.singleBlobPutThresholdInBytes, streamLength), options, function (internalBuff, contentMD5) { + azureutil.setObjectInnerPropertyValue(options, ['contentSettings', 'contentMD5'], contentMD5); + self._putBlockBlob(container, blob, internalBuff, null, internalBuff.length, options, callback); + }); + stream.resume(); + } else { + // Stream will resume when it has a pipe destination or a 'data' listener + self._putBlockBlob(container, blob, null, stream, streamLength, options, callback); + } + }; + + if (streamLength === null || streamLength >= self.singleBlobPutThresholdInBytes || blobType !== BlobConstants.BlobTypes.BLOCK) { + var chunkStream = new ChunkStreamWithStream(stream, { calcContentMd5: options.storeBlobContentMD5 }); + self._uploadContentFromChunkStream(container, blob, blobType, chunkStream, streamLength, options, callback); + } else { + putBlockBlobFromStream(); + } + }; + + if (!isNewBlob) { + if (options.storeBlobContentMD5 && blobType !== BlobConstants.BlobTypes.BLOCK) { + throw new Error(SR.MD5_NOT_POSSIBLE); + } + + if (blobType === BlobConstants.BlobTypes.APPEND || options.accessConditions) { + // Do a getBlobProperties right at the beginning for existing blobs and use the user passed in access conditions. + // So any pre-condition failure on the first block (in a strictly single writer scenario) is caught. + // This call also helps us get the append position to append to if the user hasn’t specified an access condition. + this.getBlobProperties(container, blob, options, function (error, properties, response) { + if (error && !(options.accessConditions && options.accessConditions.EtagNonMatch === '*' && response.statusCode === 400)) { + callback(error); + } else { + if (blobType === BlobConstants.BlobTypes.APPEND) { + options.appendPosition = properties.contentLength; + } + + startUpload(); + } + }); + } else { + startUpload(); + } + } else { + startUpload(); + } + + return options.speedSummary; +}; + +/** +* Upload blob content from a text. Assumes the blob already exists. +* +* @ignore +* +* @this {BlobService} +* @param {bool} isNewBlob Specifies whether the blob is newly created. +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {BlobType} blobType The blob type. +* @param (string) content The blob text, as a string or in a Buffer. +* @param {object} [options] The request options. +* @param {SpeedSummary} [options.speedSummary] The upload tracker objects. +* @param {bool} [options.absorbConditionalErrorsOnRetry] Specifies whether to absorb the conditional error on retry. (For append blob only) +* @param {string} [options.blockIdPrefix] The prefix to be used to generate the block id. (For block blob only) +* @param {string} [options.leaseId] The lease identifier. +* @param {string} [options.transactionalContentMD5] The MD5 hash of the blob content. This hash is used to verify the integrity of the blob during transport. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. +* @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. The default value is true for block blobs. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback The callback function. +* @return {SpeedSummary} +*/ +BlobService.prototype._uploadBlobFromText = function (isNewBlob, container, blob, blobType, content, optionsOrCallback, callback) { + var options; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { options = o; callback = c; }); + options.speedSummary = options.speedSummary || new SpeedSummary(blob); + options[HeaderConstants.CONTENT_TYPE] = (options.contentSettings && options.contentSettings.contentType) || 'text/plain;charset="utf-8"'; + + var self = this; + var startUpload = function () { + var operationFunc; + var length = azureutil.objectIsNull(content) ? 0 : (Buffer.isBuffer(content) ? content.length : Buffer.byteLength(content)); + + if (blobType === BlobConstants.BlobTypes.BLOCK) { + // default to true, unless explicitly set to false + options.storeBlobContentMD5 = options.storeBlobContentMD5 === false ? false : true; + operationFunc = self._putBlockBlob; + + if (length > BlobConstants.MAX_SINGLE_UPLOAD_BLOB_SIZE_IN_BYTES) { + throw new RangeError(SR.INVALID_BLOB_LENGTH); + } + } else if (blobType === BlobConstants.BlobTypes.APPEND) { + operationFunc = self._appendBlock; + + if (length > BlobConstants.MAX_APPEND_BLOB_BLOCK_SIZE) { + throw new RangeError(SR.INVALID_TEXT_LENGTH); + } + } + + var finalCallback = function (error, blobResult, response) { + if (blobType !== BlobConstants.BlobTypes.BLOCK) { + self.setBlobProperties(container, blob, options.contentSettings, options, function (error, blob, response) { + blob = extend(false, blob, blobResult); + callback(error, blob, response); + }); + } else { + callback(error, blobResult, response); + } + }; + + operationFunc.call(self, container, blob, content, null, length, options, finalCallback); + }; + + if (!isNewBlob) { + if (options.storeBlobContentMD5 && blobType !== BlobConstants.BlobTypes.BLOCK) { + throw new Error(SR.MD5_NOT_POSSIBLE); + } + + if (blobType === BlobConstants.BlobTypes.APPEND || options.accessConditions) { + // Do a getBlobProperties right at the beginning for existing blobs and use the user passed in access conditions. + // So any pre-condition failure on the first block (in a strictly single writer scenario) is caught. + // This call also helps us get the append position to append to if the user hasn’t specified an access condition. + this.getBlobProperties(container, blob, options, function (error, properties) { + if (error) { + callback(error); + } else { + if (blobType === BlobConstants.BlobTypes.APPEND) { + options.appendPosition = properties.contentLength; + } + + startUpload(); + } + }); + } + } else { + if (!azureutil.objectIsNull(content) && azureutil.objectIsNull(azureutil.tryGetValueChain(options, ['contentSettings', 'contentMD5'], null)) && options.storeBlobContentMD5) { + azureutil.setObjectInnerPropertyValue(options, ['contentSettings', 'contentMD5'], azureutil.getContentMd5(content)); + } + startUpload(); + } +}; + +/** +* Uploads a block blob from a stream. +* @ignore +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {string} text The blob text. +* @param (Stream) stream Stream to the data to store. +* @param {int} length The length of the stream or text to upload. +* @param {object} [options] The request options. +* @param {string} [options.leaseId] The lease identifier. +* @param {string} [options.transactionalContentMD5] The MD5 hash of the blob content. This hash is used to verify the integrity of the blob during transport. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. The default value is true for block blobs. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will contain +* information about the blob. +* `response` will contain information related to this operation. +*/ +BlobService.prototype._putBlockBlob = function (container, blob, text, stream, length, options, callback) { + if (!options.speedSummary) { + options.speedSummary = new SpeedSummary(blob); + } + + var speedSummary = options.speedSummary; + speedSummary.totalSize = length; + + var resourceName = createResourceName(container, blob); + var webResource = WebResource.put(resourceName) + .withHeader(HeaderConstants.CONTENT_TYPE, 'application/octet-stream') + .withHeader(HeaderConstants.BLOB_TYPE, BlobConstants.BlobTypes.BLOCK) + .withHeader(HeaderConstants.CONTENT_LENGTH, length); + + if (!azureutil.objectIsNull(text) && azureutil.objectIsNull(options.transactionalContentMD5) && options.useTransactionalMD5) { + options.transactionalContentMD5 = azureutil.getContentMd5(text); + } + + BlobResult.setHeadersFromBlob(webResource, options); + + var processResponseCallback = function (responseObject, next) { + responseObject.blobResult = null; + if (!responseObject.error) { + responseObject.blobResult = new BlobResult(container, blob); + responseObject.blobResult.getPropertiesFromHeaders(responseObject.response.headers); + if (options.metadata) { + responseObject.blobResult.metadata = options.metadata; + } + } + + var finalCallback = function (returnObject) { + if (!returnObject || !returnObject.error) { + speedSummary.increment(length); + } + callback(returnObject.error, returnObject.blobResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + if (!azureutil.objectIsNull(text)) { + this.performRequest(webResource, text, options, processResponseCallback); + } else { + this.performRequestOutputStream(webResource, stream, options, processResponseCallback); + } + + return options.speedSummary; +}; + +/** +* Appends a new block to an append blob. +* +* @ignore +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {string|buffer} content The block content. +* @param (Stream) stream The stream to the data to store. +* @param {int} length The length of the stream or content to upload. +* @param {object} [options] The request options. +* @param {bool} [options.absorbConditionalErrorsOnRetry] Specifies whether to absorb the conditional error on retry. +* @param {int} [options.maxBlobSize] The max length in bytes allowed for the append blob to grow to. +* @param {int} [options.appendPosition] The number indicating the byte offset to check for. The append will succeed only if the end position of the blob is equal to this number. +* @param {string} [options.leaseId] The target blob lease identifier. +* @param {string} [options.transactionalContentMD5] The blob’s MD5 hash. This hash is used to verify the integrity of the blob during transport. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information +* if an error occurs; otherwise +* `response` will contain information related to this operation. +*/ +BlobService.prototype._appendBlock = function (container, blob, content, stream, length, options, callback) { + var speedSummary = options.speedSummary || new SpeedSummary(blob); + speedSummary.totalSize = length; + + var self = this; + var startAppendBlock = function () { + var resourceName = createResourceName(container, blob); + + var webResource = WebResource.put(resourceName) + .withQueryOption(QueryStringConstants.COMP, 'appendblock') + .withHeader(HeaderConstants.CONTENT_LENGTH, length) + .withHeader(HeaderConstants.BLOB_CONDITION_MAX_SIZE, options.maxBlobSize) + .withHeader(HeaderConstants.BLOB_CONDITION_APPEND_POSITION, options.appendPosition); + + BlobResult.setHeadersFromBlob(webResource, options); + + var processResponseCallback = function (responseObject, next) { + responseObject.blobResult = null; + if (!responseObject.error) { + responseObject.blobResult = new BlobResult(container, blob); + responseObject.blobResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function (returnObject) { + if (!returnObject || !returnObject.error) { + speedSummary.increment(length); + } + callback(returnObject.error, returnObject.blobResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + if (!azureutil.objectIsNull(content)) { + self.performRequest(webResource, content, options, processResponseCallback); + } else { + self.performRequestOutputStream(webResource, stream, options, processResponseCallback); + } + }; + + if (azureutil.objectIsNull(options.transactionalContentMD5) && options.useTransactionalMD5) { + if (!azureutil.objectIsNull(content)) { + options.transactionalContentMD5 = azureutil.getContentMd5(content); + startAppendBlock(); + } else { + azureutil.calculateMD5(stream, length, options, function (internalBuff, contentMD5) { + options.transactionalContentMD5 = contentMD5; + content = internalBuff; + length = internalBuff.length; + startAppendBlock(); + }); + } + } else { + startAppendBlock(); + } + + return options.speedSummary; +}; + +/** +* Creates and dispatches lease requests. +* @ignore +* +* @this {BlobService} +* @param {object} webResource The web resource. +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {string} leaseId The lease identifier. Required to renew, change or release the lease. +* @param {string} leaseAction The lease action (BlobConstants.LeaseOperation.*). Required. +* @param {object} userOptions The request options. +* @param {int} [userOptions.leaseBreakPeriod] The lease break period. +* @param {string} [userOptions.leaseDuration] The lease duration. Default is never to expire. +* @param {string} [userOptions.proposedLeaseId] The proposed lease identifier. This is required for the CHANGE lease action. +* @param {LocationMode} [userOptions.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {int} [userOptions.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [userOptions.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {Function(error, lease, response)} callback `error` will contain information +* if an error occurs; otherwise `lease` will contain +* the lease information. +* `response` will contain information related to this operation. +*/ +BlobService.prototype._leaseImpl = function (container, blob, leaseId, leaseAction, options, callback) { + var webResource; + if (!azureutil.objectIsNull(blob)) { + validate.validateArgs('_leaseImpl', function (v) { + v.string(blob, 'blob'); + }); + var resourceName = createResourceName(container, blob); + webResource = WebResource.put(resourceName); + } else { + webResource = WebResource.put(container) + .withQueryOption(QueryStringConstants.RESTYPE, 'container'); + } + + webResource.withQueryOption(QueryStringConstants.COMP, 'lease') + .withHeader(HeaderConstants.LEASE_ID, leaseId) + .withHeader(HeaderConstants.LEASE_ACTION, leaseAction.toLowerCase()) + .withHeader(HeaderConstants.LEASE_BREAK_PERIOD, options.leaseBreakPeriod) + .withHeader(HeaderConstants.PROPOSED_LEASE_ID, options.proposedLeaseId) + .withHeader(HeaderConstants.LEASE_DURATION, options.leaseDuration); + + var processResponseCallback = function (responseObject, next) { + responseObject.leaseResult = null; + if (!responseObject.error) { + responseObject.leaseResult = new LeaseResult(container, blob); + responseObject.leaseResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.leaseResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Updates a page blob from text. +* @ignore +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {string} text The text string. +* @param {Readable} readStream The Node.js Readable stream. +* @param {int} rangeStart The range start. +* @param {int} rangeEnd The range end. +* @param {object} [options] The request options. +* @param {string} [options.leaseId] The target blob lease identifier. +* @param {string} [options.transactionalContentMD5] An MD5 hash of the page content. This hash is used to verify the integrity of the page during transport. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {Function(error, pageBlob, response)} callback `error` will contain information +* if an error occurs; otherwise `pageBlob` will contain +* the blob information. +* `response` will contain information related to this operation. +*/ +BlobService.prototype._createPages = function (container, blob, text, readStream, rangeStart, rangeEnd, options, callback) { + var request = this._updatePageBlobPagesImpl(container, blob, rangeStart, rangeEnd, BlobConstants.PageWriteOptions.UPDATE, options); + + // At this point, we have already validated that the range is less than 4MB. Therefore, we just need to calculate the contentMD5 if required. + // Even when this is called from the createPagesFromStream method, it is pre-buffered and called with text. + if (!azureutil.objectIsNull(text) && azureutil.objectIsNull(options.transactionalContentMD5) && options.useTransactionalMD5) { + request.withHeader(HeaderConstants.CONTENT_MD5, azureutil.getContentMd5(text)); + } + + var processResponseCallback = function (responseObject, next) { + responseObject.blobResult = null; + if (!responseObject.error) { + responseObject.blobResult = new BlobResult(container, blob); + responseObject.blobResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.blobResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + if (!azureutil.objectIsNull(text)) { + this.performRequest(request, text, options, processResponseCallback); + } else { + this.performRequestOutputStream(request, readStream, options, processResponseCallback); + } +}; + +/** +* @ignore +*/ +BlobService.prototype._updatePageBlobPagesImpl = function (container, blob, rangeStart, rangeEnd, writeMethod, options) { + if (rangeStart && rangeStart % BlobConstants.PAGE_SIZE !== 0) { + throw new RangeError(SR.INVALID_PAGE_START_OFFSET); + } + + if (rangeEnd && (rangeEnd + 1) % BlobConstants.PAGE_SIZE !== 0) { + throw new RangeError(SR.INVALID_PAGE_END_OFFSET); + } + + // this is necessary if this is called from _uploadContentFromChunkStream->_createPages + if (!options) { + options = {}; + } + + options.rangeStart = rangeStart; + options.rangeEnd = rangeEnd; + + options.contentLength = writeMethod === BlobConstants.PageWriteOptions.UPDATE ? (rangeEnd - rangeStart) + 1 : 0; + + var resourceName = createResourceName(container, blob); + var webResource = WebResource.put(resourceName) + .withQueryOption(QueryStringConstants.COMP, 'page') + .withHeader(HeaderConstants.CONTENT_TYPE, 'application/octet-stream') + .withHeader(HeaderConstants.PAGE_WRITE, writeMethod); + + BlobResult.setHeadersFromBlob(webResource, options); + + return webResource; +}; + +/** +* Uploads blob content from a stream. +* For block blob, it creates a new block to be committed. +* For page blob, it writes a range of pages. +* For append blob, it appends a new block. +* +* @ignore +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {string} blobType The blob type. +* @param (Stream) stream Stream to the data to store. +* @param {int} streamLength The length of the stream to upload. +* @param {object|function} [options] The request options. +* @param {SpeedSummary} [options.speedSummary] The download tracker objects; +* @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. +* @param {bool} [options.absorbConditionalErrorsOnRetry] Specifies whether to absorb the conditional error on retry. (For append blob only) +* @param {int} [options.maxBlobSize] The max length in bytes allowed for the append blob to grow to. +* @param {int} [options.appendPosition] The number indicating the byte offset to check for. The append will succeed only if the end position of the blob is equal to this number. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {string} [options.blockIdPrefix] The prefix to be used to generate the block id. (For block blob only) +* @param {int} [options.blockSize] The size of each block. Maximum is 100MB. (For block blob only) +* @param {string} [options.leaseId] The lease identifier. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {function(error, null)} callback The callback function. +* @return {SpeedSummary} +*/ + +BlobService.prototype._uploadContentFromChunkStream = function (container, blob, blobType, chunkStream, streamLength, options, callback) { + this.logger.debug(util.format('_uploadContentFromChunkStream for blob %s', blob)); + + var apiName; + var isBlockBlobUpload; + var isPageBlobUpload; + var isAppendBlobUpload; + var sizeLimitation; + var originalContentMD5 = azureutil.tryGetValueChain(options, ['contentSettings', 'contentMD5'], null); + var parallelOperationThreadCount = options.parallelOperationThreadCount || this.parallelOperationThreadCount; + + if (blobType == BlobConstants.BlobTypes.BLOCK) { + apiName = 'createBlockFromText'; + isBlockBlobUpload = true; + + // BlockBlob can only have 50000 blocks in maximum + var minBlockSize = Math.ceil(streamLength / 50000); + if (options.blockSize) { + if (options.blockSize < minBlockSize) { + // options.blockSize is less than the minBlockSize, error callback + var error = new ArgumentError('options.blockSize', util.format('The minimum blockSize is %s and the provided blockSize %s is too small.', minBlockSize, options.blockSize)); + callback(error); + return; + } else { + sizeLimitation = options.blockSize; + } + } else { + // 4MB minimum for auto-calculated block size + sizeLimitation = Math.max(minBlockSize, BlobConstants.DEFAULT_WRITE_BLOCK_SIZE_IN_BYTES); + } + } else if (blobType == BlobConstants.BlobTypes.PAGE) { + apiName = '_createPages'; + isPageBlobUpload = true; + sizeLimitation = BlobConstants.DEFAULT_WRITE_PAGE_SIZE_IN_BYTES; + } else if (blobType == BlobConstants.BlobTypes.APPEND) { + apiName = 'appendBlockFromText'; + isAppendBlobUpload = true; + parallelOperationThreadCount = 1; + sizeLimitation = BlobConstants.DEFAULT_WRITE_BLOCK_SIZE_IN_BYTES; + } else { + var error = new ArgumentError('blobType', util.format('Unknown blob type %s', blobType)); + callback(error); + return; + } + + chunkStream._highWaterMark = sizeLimitation; + + this._setOperationExpiryTime(options); + + // initialize the speed summary + var speedSummary = options.speedSummary || new SpeedSummary(blob); + speedSummary.totalSize = streamLength; + + // initialize chunk allocator + var allocator = new ChunkAllocator(sizeLimitation, parallelOperationThreadCount, { logger: this.logger }); + chunkStream.setMemoryAllocator(allocator); + chunkStream.setOutputLength(streamLength); + + // if this is a FileReadStream, set the allocator on that stream + if (chunkStream._stream && chunkStream._stream.setMemoryAllocator) { + var fileReadStreamAllocator = new ChunkAllocator(chunkStream._stream._highWaterMark, parallelOperationThreadCount, { logger: this.logger }); + chunkStream._stream.setMemoryAllocator(fileReadStreamAllocator); + } + + // initialize batch operations + var batchOperations = new BatchOperation(apiName, { + callInOrder: isAppendBlobUpload, + callbackInOrder: isAppendBlobUpload, + logger: this.logger, + enableReuseSocket: this.defaultEnableReuseSocket, + operationMemoryUsage: sizeLimitation + }); + batchOperations.setConcurrency(parallelOperationThreadCount); + + // initialize options + var rangeOptions = { + leaseId: options.leaseId, + timeoutIntervalInMs: options.timeoutIntervalInMs, + clientRequestTimeoutInMs: options.clientRequestTimeoutInMs, + operationExpiryTime: options.operationExpiryTime, + maxBlobSize: options.maxBlobSize, + appendPosition: options.appendPosition || 0, + initialAppendPosition: options.appendPosition || 0, + absorbConditionalErrorsOnRetry: options.absorbConditionalErrorsOnRetry + }; + + // initialize block blob variables + var blockIdPrefix = options.blockIdPrefix || this.generateBlockIdPrefix(); + var blockCount = 0; + var blockIds = []; + var blobResult = {}; + + var self = this; + chunkStream.on('data', function (data, range) { + var operation = null; + var full = false; + var autoIncrement = speedSummary.getAutoIncrementFunction(data.length); + + if (data.length > sizeLimitation) { + throw new RangeError(util.format(SR.EXCEEDED_SIZE_LIMITATION, sizeLimitation, data.length)); + } + + if (options.useTransactionalMD5) { + //calculate content md5 for the current uploading block data + var contentMD5 = azureutil.getContentMd5(data); + rangeOptions.transactionalContentMD5 = contentMD5; + } + + var checkLengthLimit = function () { + if (!streamLength) return true; + if (range.start >= streamLength) { + self.logger.debug(util.format('Stop uploading data from %s bytes to %s bytes to blob %s because of limit %s', range.start, range.end, blob, streamLength)); + chunkStream.stop(); + return false; + } else if (range.end >= streamLength) { + self.logger.debug(util.format('Clip uploading data from %s bytes to %s bytes to blob %s because of limit %s', range.start, range.end, blob, streamLength)); + range.end = streamLength - 1; + data = data.slice(0, streamLength - range.start); + if (options.useTransactionalMD5) { + rangeOptions.transactionalContentMD5 = azureutil.getContentMd5(data); + } + } + return true; + }; + + var uploadBlockBlobChunk = function () { + if (!checkLengthLimit()) return; + var blockId = self.getBlockId(blockIdPrefix, blockCount); + blockIds.push(blockId); + + operation = new BatchOperation.RestOperation(self, apiName, blockId, container, blob, data, rangeOptions, function (error) { + if (!error) { + autoIncrement(); + } else { + self.logger.debug(util.format('Stop uploading data as error happens. Error: %s', util.inspect(error))); + chunkStream.stop(); + } + allocator.releaseBuffer(data); + data = null; + }); + + blockCount++; + }; + + var uploadPageBlobChunk = function () { + if (!checkLengthLimit()) return; + + if (azureutil.isBufferAllZero(data)) { + self.logger.debug(util.format('Skip upload data from %s bytes to %s bytes to blob %s', range.start, range.end, blob)); + speedSummary.increment(data.length); + } else { + self.logger.debug(util.format('Upload data from %s bytes to %s bytes to blob %s', range.start, range.end, blob)); + operation = new BatchOperation.RestOperation(self, apiName, container, blob, data, null, range.start, range.end, rangeOptions, function (error) { + if (!error) { + autoIncrement(); + } else { + self.logger.debug(util.format('Stop uploading data as error happens. Error: %s', util.inspect(error))); + chunkStream.stop(); + } + allocator.releaseBuffer(data); + data = null; + }); + } + }; + + var uploadAppendBlobChunk = function () { + if (!checkLengthLimit()) return; + + rangeOptions.appendPosition = Number(rangeOptions.initialAppendPosition) + Number(range.start); + + // We cannot differentiate between max size condition failing only in the retry versus failing in the first attempt and retry. + // So we will eliminate the latter and handle the former in the append operation callback. + if (options.maxBlobSize && rangeOptions.appendPosition + data.length > options.maxBlobSize) { + throw new Error(SR.MAX_BLOB_SIZE_CONDITION_NOT_MEET); + } + + operation = new BatchOperation.RestOperation(self, apiName, container, blob, data, rangeOptions, function (error, currentBlob) { + if (!error) { + autoIncrement(); + } else { + self.logger.debug(util.format('Stop uploading data as error happens. Error: %s', util.inspect(error))); + chunkStream.stop(); + } + blobResult = currentBlob; + allocator.releaseBuffer(data); + data = null; + }); + }; + + if (isBlockBlobUpload) { + uploadBlockBlobChunk(); + } else if (isAppendBlobUpload) { + uploadAppendBlobChunk(); + } else if (isPageBlobUpload) { + uploadPageBlobChunk(); + } + + if (operation) { + full = batchOperations.addOperation(operation); + operation = null; + + if (full) { + self.logger.debug('File stream paused'); + chunkStream.pause(); + } + } + }); + + chunkStream.on('end', function () { + self.logger.debug(util.format('File read stream ended for blob %s', blob)); + batchOperations.enableComplete(); + }); + + batchOperations.on('drain', function () { + self.logger.debug('file stream resume'); + chunkStream.resume(); + }); + + batchOperations.on('end', function (error) { + self.logger.debug('batch operations commited'); + + speedSummary = null; + if (error) { + callback(error); + return; + } + + if (originalContentMD5) { + options.contentSettings.contentMD5 = originalContentMD5; + } else if (options.storeBlobContentMD5) { + var contentMD5 = chunkStream.getContentMd5('base64'); + azureutil.setObjectInnerPropertyValue(options, ['contentSettings', 'contentMD5'], contentMD5); + } + + if (isBlockBlobUpload) { + //commit block list + var blockList = { 'UncommittedBlocks': blockIds }; + self.commitBlocks(container, blob, blockList, options, function (error, blockList, response) { + self.logger.debug(util.format('Blob %s committed', blob)); + + if (error) { + chunkStream.finish(); + + callback(error); + } else { + blobResult['commmittedBlocks'] = blockIds; + + chunkStream.finish(); + callback(error, blobResult, response); + } + }); + } else { + // upload page blob or append blob completely + var blobProperties = options.contentSettings; + self.setBlobProperties(container, blob, blobProperties, function (error, blob, response) { + chunkStream.finish(); + blob = extend(false, blob, blobResult); + callback(error, blob, response); + }); + } + }); + + return speedSummary; +}; + +/** +* Checks whether or not a container exists on the service. +* @ignore +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} primaryOnly If true, the request will be executed against the primary storage location. +* @param {object} [options] The request options. +* @param {string} [options.leaseId] The lease identifier. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {Function(error, result, response)} callback `error` will contain information +* if an error occurs; otherwise `result` will contain +* the container information including `exists` boolean member. +* `response` will contain information related to this operation. +*/ +BlobService.prototype._doesContainerExist = function (container, primaryOnly, options, callback) { + var webResource = WebResource.head(container) + .withQueryOption(QueryStringConstants.RESTYPE, 'container') + .withHeader(HeaderConstants.LEASE_ID, options.leaseId); + + if (primaryOnly === false) { + options.requestLocationMode = RequestLocationMode.PRIMARY_OR_SECONDARY; + } + + var processResponseCallback = function (responseObject, next) { + responseObject.containerResult = new ContainerResult(container); + if (!responseObject.error) { + responseObject.containerResult.exists = true; + responseObject.containerResult.getPropertiesFromHeaders(responseObject.response.headers); + + } else if (responseObject.error && responseObject.error.statusCode === Constants.HttpConstants.HttpResponseCodes.NotFound) { + responseObject.error = null; + responseObject.containerResult.exists = false; + responseObject.response.isSuccessful = true; + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.containerResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Checks whether or not a blob exists on the service. +* @ignore +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {string} primaryOnly If true, the request will be executed against the primary storage location. +* @param {object} [options] The request options. +* @param {string} [options.snapshotId] The snapshot identifier. +* @param {string} [options.leaseId] The lease identifier. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {Function(error, result, response)} callback `error` will contain information +* if an error occurs; otherwise `result` will contain +* the blob information including `exists` boolean member. +* `response` will contain information related to this operation. +*/ +BlobService.prototype._doesBlobExist = function (container, blob, primaryOnly, options, callback) { + var resourceName = createResourceName(container, blob); + var webResource = WebResource.head(resourceName) + .withQueryOption(QueryStringConstants.SNAPSHOT, options.snapshotId) + .withHeader(HeaderConstants.LEASE_ID, options.leaseId); + + if (primaryOnly === false) { + options.requestLocationMode = RequestLocationMode.PRIMARY_OR_SECONDARY; + } + + var processResponseCallback = function (responseObject, next) { + responseObject.blobResult = new BlobResult(container, blob); + if (!responseObject.error) { + responseObject.blobResult.exists = true; + responseObject.blobResult.getPropertiesFromHeaders(responseObject.response.headers); + + } else if (responseObject.error && responseObject.error.statusCode === Constants.HttpConstants.HttpResponseCodes.NotFound) { + responseObject.error = null; + responseObject.blobResult.exists = false; + responseObject.response.isSuccessful = true; + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.blobResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* @ignore +*/ +BlobService.prototype._setBlobPropertiesHelper = function (settings) { + var processResponseCallback = function (responseObject, next) { + responseObject.blobResult = null; + if (!responseObject.error) { + responseObject.blobResult = new BlobResult(settings.container, settings.blob); + responseObject.blobResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function (returnObject) { + settings.callback(returnObject.error, returnObject.blobResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(settings.webResource, null, settings.options, processResponseCallback); +}; + +/** +* @ignore +*/ +BlobService.prototype._validateLengthAndMD5 = function (options, responseObject) { + var storedMD5 = responseObject.response.headers[Constants.HeaderConstants.CONTENT_MD5]; + var contentLength; + + if (!azureutil.objectIsNull(responseObject.response.headers[Constants.HeaderConstants.CONTENT_LENGTH])) { + contentLength = parseInt(responseObject.response.headers[Constants.HeaderConstants.CONTENT_LENGTH], 10); + } + + // If the user has not specified this option, the default value should be false. + if (azureutil.objectIsNull(options.disableContentMD5Validation)) { + options.disableContentMD5Validation = false; + } + + // None of the below cases should be retried. So set the error in every case so the retry policy filter handle knows that it shouldn't be retried. + if (options.disableContentMD5Validation === false && options.useTransactionalMD5 === true && azureutil.objectIsNull(storedMD5)) { + responseObject.error = new StorageError(SR.MD5_NOT_PRESENT_ERROR); + responseObject.retryable = false; + } + + // Validate length and if required, MD5. + // If getBlobToText called this method, then the responseObject.length and responseObject.contentMD5 are not set. Calculate them first using responseObject.response.body and then validate. + if (azureutil.objectIsNull(responseObject.length)) { + if (typeof responseObject.response.body == 'string') { + responseObject.length = Buffer.byteLength(responseObject.response.body); + } else if (Buffer.isBuffer(responseObject.response.body)) { + responseObject.length = responseObject.response.body.length; + } + } + + if (!azureutil.objectIsNull(contentLength) && responseObject.length !== contentLength) { + responseObject.error = new Error(SR.CONTENT_LENGTH_MISMATCH); + responseObject.retryable = false; + } + + if (options.disableContentMD5Validation === false && azureutil.objectIsNull(responseObject.contentMD5)) { + responseObject.contentMD5 = azureutil.getContentMd5(responseObject.response.body); + } + + if (options.disableContentMD5Validation === false && !azureutil.objectIsNull(storedMD5) && storedMD5 !== responseObject.contentMD5) { + responseObject.error = new Error(util.format(SR.HASH_MISMATCH, storedMD5, responseObject.contentMD5)); + responseObject.retryable = false; + } +}; + +/** +* @ignore +*/ +BlobService.prototype._setRangeContentMD5Header = function (webResource, options) { + if (!azureutil.objectIsNull(options.rangeStart) && options.useTransactionalMD5) { + if (azureutil.objectIsNull(options.rangeEnd)) { + throw new ArgumentNullError('options.rangeEndHeader', util.format(SR.ARGUMENT_NULL_OR_EMPTY, options.rangeEndHeader)); + } + + var size = parseInt(options.rangeEnd, 10) - parseInt(options.rangeStart, 10) + 1; + if (size > BlobConstants.MAX_RANGE_GET_SIZE_WITH_MD5) { + throw new ArgumentError('options', SR.INVALID_RANGE_FOR_MD5); + } else { + webResource.withHeader(HeaderConstants.RANGE_GET_CONTENT_MD5, 'true'); + } + } +}; + +/** +* Downloads a blockblob, pageblob or appendblob into a range stream. +* @ignore +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {string} blobType The type of blob to download: block blob, page blob or append blob. +* @param {Writable} writeStream The Node.js Writable stream. +* @param {object} [options] The request options. +* @param {SpeedSummary} [options.speedSummary] The download tracker objects. +* @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. +* @param {string} [options.snapshotId] The snapshot identifier. +* @param {string} [options.leaseId] The lease identifier. +* @param {string} [options.rangeStart] Return only the bytes of the blob in the specified range. +* @param {string} [options.rangeEnd] Return only the bytes of the blob in the specified range. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {boolean} [options.useTransactionalMD5] When set to true, Calculate and send/validate content MD5 for transactions. +* @param {boolean} [options.disableContentMD5Validation] When set to true, MD5 validation will be disabled when downloading blobs. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `result` will contain the blob information. +* `response` will contain information related to this operation. +* @return {SpeedSummary} +*/ +BlobService.prototype._getBlobToRangeStream = function (container, blob, blobType, writeStream, optionsOrCallback, callback) { + var options; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { options = o; callback = c; }); + + validate.validateArgs('_getBlobToRangeStream', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.blobNameIsValid(container, blob); + v.blobTypeIsValid(blobType); + v.callback(callback); + }); + + var rangeStream = null; + var isPageBlobDownload = true; + + if (blobType == BlobConstants.BlobTypes.PAGE) { + rangeStream = new PageRangeStream(this, container, blob, options); + } else if (blobType == BlobConstants.BlobTypes.APPEND) { + rangeStream = new RangeStream(this, container, blob, options); + isPageBlobDownload = false; + } else if (blobType == BlobConstants.BlobTypes.BLOCK) { + rangeStream = new BlockRangeStream(this, container, blob, options); + isPageBlobDownload = false; + } + + if (!options.speedSummary) { + options.speedSummary = new SpeedSummary(blob); + } + + var speedSummary = options.speedSummary; + var parallelOperationThreadCount = options.parallelOperationThreadCount || this.parallelOperationThreadCount; + var batchOperations = new BatchOperation('getBlobInRanges', { callbackInOrder: true, logger: this.logger, enableReuseSocket: this.defaultEnableReuseSocket }); + batchOperations.setConcurrency(parallelOperationThreadCount); + + var self = this; + var checkMD5sum = !options.disableContentMD5Validation; + var md5Hash = null; + if (checkMD5sum) { + md5Hash = new Md5Wrapper().createMd5Hash(); + } + + var savedBlobResult = null; + var savedBlobResponse = null; + + rangeStream.on('range', function (range) { + if (!speedSummary.totalSize) { + speedSummary.totalSize = rangeStream.rangeSize; + } + + var requestOptions = { + rangeStart: range.start, + rangeEnd: range.end, + responseEncoding: null //Use Buffer to store the response data + }; + + var rangeSize = range.size; + requestOptions.timeoutIntervalInMs = options.timeoutIntervalInMs; + requestOptions.clientRequestTimeoutInMs = options.clientRequestTimeoutInMs; + requestOptions.useTransactionalMD5 = options.useTransactionalMD5; + requestOptions.snapshotId = options.snapshotId; + + if (range.dataSize === 0) { + if (isPageBlobDownload) { + var autoIncrement = speedSummary.getAutoIncrementFunction(rangeSize); + //No operation to do and only wait for write zero to file in callback + var writeZeroOperation = new BatchOperation.CommonOperation(BatchOperation.noOperation, function (error) { + if (error) return; + var bufferAvailable = azureutil.writeZerosToStream(writeStream, rangeSize, md5Hash, autoIncrement); + //There is no need to pause the rangestream since we can perform http request and write disk at the same time + self.logger.debug(util.format('Write %s bytes Zero from %s to %s', rangeSize, range.start, range.end)); + if (!bufferAvailable) { + self.logger.debug('Write stream is full and pause batch operation'); + batchOperations.pause(); + } + }); + batchOperations.addOperation(writeZeroOperation); + } else { + self.logger.debug(util.format('Can not read %s bytes to %s bytes of blob %s', range.start, range.end, blob)); + } + return; + } + + if (range.start > range.end) { + return; + } + + var operation = new BatchOperation.RestOperation(self, 'getBlobToText', container, blob, requestOptions, function (error, content, blobResult, response) { + if (!error) { + if (rangeSize !== content.length) { + self.logger.warn(util.format('Request %s bytes, but server returns %s bytes', rangeSize, content.length)); + } + //Save one of the succeeded callback parameters and use them at the final callback + if (!savedBlobResult) { + savedBlobResult = blobResult; + } + if (!savedBlobResponse) { + savedBlobResponse = response; + } + var autoIncrement = speedSummary.getAutoIncrementFunction(content.length); + var bufferAvailable = writeStream.write(content, autoIncrement); + if (!bufferAvailable) { + self.logger.debug('Write stream is full and pause batch operation'); + batchOperations.pause(); + } + if (md5Hash) { + md5Hash.update(content); + } + content = null; + } else { + self.logger.debug(util.format('Stop downloading data as error happens. Error: %s', util.inspect(error))); + rangeStream.stop(); + } + }); + + var full = batchOperations.addOperation(operation); + if (full) { + self.logger.debug('Pause range stream'); + rangeStream.pause(); + } + }); + + rangeStream.on('end', function () { + self.logger.debug('Range stream has ended.'); + batchOperations.enableComplete(); + }); + + batchOperations.on('drain', function () { + self.logger.debug('Resume range stream'); + rangeStream.resume(); + }); + + writeStream.on('drain', function () { + self.logger.debug('Resume batch operations'); + batchOperations.resume(); + }); + + batchOperations.on('end', function (error) { + self.logger.debug('Download completed!'); + if (error) { + callback(error); + return; + } else { + writeStream.end(function () { + self.logger.debug('Write stream has ended'); + if (!savedBlobResult) { + savedBlobResult = {}; + } + + azureutil.setObjectInnerPropertyValue(savedBlobResult, ['contentSettings', 'contentMD5'], azureutil.tryGetValueChain(options, ['contentSettings', 'contentMD5'], null)); + savedBlobResult.clientSideContentMD5 = null; + if (md5Hash) { + savedBlobResult.clientSideContentMD5 = md5Hash.digest('base64'); + } + callback(error, savedBlobResult, savedBlobResponse); + }); + } + }); + + var listOptions = { + timeoutIntervalInMs: options.timeoutIntervalInMs, + clientRequestTimeoutInMs: options.clientRequestTimeoutInMs, + snapshotId: options.snapshotId, + leaseId: options.leaseId, + blockListFilter: BlobUtilities.BlockListFilter.COMMITTED + }; + + rangeStream.list(listOptions, function (error) { + callback(error); + }); + + return speedSummary; +}; + +/** +* Downloads a blockblob or pageblob into a stream. +* @ignore +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {Writable} writeStream The Node.js Writable stream. +* @param {object} [options] The request options. +* @param {string} [options.snapshotId] The snapshot identifier. +* @param {string} [options.leaseId] The lease identifier. +* @param {string} [options.rangeStart] Return only the bytes of the blob in the specified range. +* @param {string} [options.rangeEnd] Return only the bytes of the blob in the specified range. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {boolean} [options.useTransactionalMD5] When set to true, Calculate and send/validate content MD5 for transactions. +* @param {boolean} [options.disableContentMD5Validation] When set to true, MD5 validation will be disabled when downloading blobs. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `result` will contain the blob information. +* `response` will contain information related to this operation. +*/ +BlobService.prototype._getBlobToStream = function (container, blob, writeStream, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + var resourceName = createResourceName(container, blob); + var webResource = WebResource.get(resourceName).withRawResponse(); + + var options = extend(true, {}, userOptions); + webResource.withQueryOption(QueryStringConstants.SNAPSHOT, options.snapshotId); + + BlobResult.setHeadersFromBlob(webResource, options); + + this._setRangeContentMD5Header(webResource, options); + + var self = this; + var processResponseCallback = function (responseObject, next) { + responseObject.blobResult = null; + + if (!responseObject.error) { + responseObject.blobResult = new BlobResult(container, blob); + responseObject.blobResult.metadata = self.parseMetadataHeaders(responseObject.response.headers); + responseObject.blobResult.getPropertiesFromHeaders(responseObject.response.headers); + + self._validateLengthAndMD5(options, responseObject); + + if (options.speedSummary) { + options.speedSummary.increment(responseObject.length); + } + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.blobResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequestInputStream(webResource, null, writeStream, options, processResponseCallback); +}; + +/** +* Lists a segment containing a collection of blob items whose names begin with the specified prefix in the container. +* @ignore +* @this {BlobService} +* @param {string} container The container name. +* @param {string} prefix The prefix of the blob name. +* @param {object} currentToken A continuation token returned by a previous listing operation. Please use 'null' or 'undefined' if this is the first operation. +* @param {ListBlobTypes} listBlobType Specifies the item type of the results. +* @param {object} [options] The request options. +* @param {int} [options.maxResults] Specifies the maximum number of blobs to return per call to Azure ServiceClient. This does NOT affect list size returned by this function. (maximum: 5000) +* @param {string} [options.include] Specifies that the response should include one or more of the following subsets: '', 'metadata', 'snapshots', 'uncommittedblobs', 'copy', 'deleted'). +* Please find these values in BlobUtilities.BlobListingDetails. Multiple values can be added separated with a comma (,). +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will contain +* the entries of blobs and the continuation token for the next listing operation. +* `response` will contain information related to this operation. +*/ +BlobService.prototype._listBlobsOrDirectoriesSegmentedWithPrefix = function (container, prefix, currentToken, listBlobType, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('listBlobsSegmented', function (v) { + v.string(container, 'container'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.get(container) + .withQueryOption(QueryStringConstants.RESTYPE, 'container') + .withQueryOption(QueryStringConstants.COMP, 'list') + .withQueryOption(QueryStringConstants.MAX_RESULTS, options.maxResults) + .withQueryOptions(options, + QueryStringConstants.DELIMITER, + QueryStringConstants.INCLUDE); + + if (!azureutil.objectIsNull(currentToken)) { + webResource.withQueryOption(QueryStringConstants.MARKER, currentToken.nextMarker); + } + + webResource.withQueryOption(QueryStringConstants.PREFIX, prefix); + + options.requestLocationMode = azureutil.getNextListingLocationMode(currentToken); + + var processResponseCallback = function (responseObject, next) { + responseObject.listBlobsResult = null; + if (!responseObject.error) { + responseObject.listBlobsResult = { + entries: null, + continuationToken: null + }; + + responseObject.listBlobsResult.entries = []; + var results = []; + + if (listBlobType == BlobConstants.ListBlobTypes.Directory && responseObject.response.body.EnumerationResults.Blobs.BlobPrefix) { + results = responseObject.response.body.EnumerationResults.Blobs.BlobPrefix; + if (!_.isArray(results)) { + results = [results]; + } + } else if (listBlobType == BlobConstants.ListBlobTypes.Blob && responseObject.response.body.EnumerationResults.Blobs.Blob) { + results = responseObject.response.body.EnumerationResults.Blobs.Blob; + if (!_.isArray(results)) { + results = [results]; + } + } + + results.forEach(function (currentBlob) { + var blobResult = BlobResult.parse(currentBlob); + responseObject.listBlobsResult.entries.push(blobResult); + }); + + if (responseObject.response.body.EnumerationResults.NextMarker) { + responseObject.listBlobsResult.continuationToken = { + nextMarker: null, + targetLocation: null + }; + + responseObject.listBlobsResult.continuationToken.nextMarker = responseObject.response.body.EnumerationResults.NextMarker; + responseObject.listBlobsResult.continuationToken.targetLocation = responseObject.targetLocation; + } + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.listBlobsResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Create a new blob. +* @ignore +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {BlobType} blobType The blob type. +* @param {int} size The blob size. +* @param {object} [options] The request options. +* @param {string} [options.blobTier] For page blobs on premium accounts only. Set the tier of the target blob. Refer to BlobUtilities.BlobTier.PremiumPageBlobTier. +* @param {errorOrResult} callback The callback which operates on the specific blob. +*/ +BlobService.prototype._createBlob = function (container, blob, blobType, size, options, creationCallback) { + if (blobType == BlobConstants.BlobTypes.APPEND) { + this.createOrReplaceAppendBlob(container, blob, options, function (createError, createResponse) { + creationCallback(createError, null, createResponse); + }); + } else if (blobType == BlobConstants.BlobTypes.PAGE) { + this.createPageBlob(container, blob, size, options, function (createError) { + creationCallback(createError); + }); + } else if (blobType == BlobConstants.BlobTypes.BLOCK) { + creationCallback(); + } +}; + +/** +* The callback for {BlobService~getBlobToText}. +* @typedef {function} BlobService~blobToText +* @param {object} error If an error occurs, the error information. +* @param {string} text The text returned from the blob. +* @param {object} blockBlob Information about the blob. +* @param {object} response Information related to this operation. +*/ + +BlobService.SpeedSummary = SpeedSummary; + +module.exports = BlobService; + + +/***/ }), + +/***/ 50716: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var azureCommon = __nccwpck_require__(18418); +var BlobService = __nccwpck_require__(10210); +var extend = __nccwpck_require__(99237); +var fs = __nccwpck_require__(57147); +var mime = __nccwpck_require__(46210); + +var azureutil = azureCommon.util; +var Constants = azureCommon.Constants; +var FileReadStream = azureCommon.FileReadStream; +var SpeedSummary = azureCommon.SpeedSummary; +var validate = azureCommon.validate; +var BlobConstants = Constants.BlobConstants; + +/** +* Downloads a blob into a file. +* (Not available in the JavaScript Client Library for Browsers) +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {string} localFileName The local path to the file to be downloaded. +* @param {object} [options] The request options. +* @param {boolean} [options.skipSizeCheck] Skip the size check to perform direct download. +* Set the option to true for small blobs. +* Parallel download and speed summary won't work with this option on. +* @param {SpeedSummary} [options.speedSummary] The download tracker objects. +* @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. +* @param {string} [options.snapshotId] The snapshot identifier. +* @param {string} [options.leaseId] The lease identifier. +* @param {string} [options.rangeStart] Return only the bytes of the blob in the specified range. +* @param {string} [options.rangeEnd] Return only the bytes of the blob in the specified range. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {boolean} [options.useTransactionalMD5] When set to true, Calculate and send/validate content MD5 for transactions. +* @param {boolean} [options.disableContentMD5Validation] When set to true, MD5 validation will be disabled when downloading blobs. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `[result]{@link BlobResult}` will contain the blob information. +* `response` will contain information related to this operation. +* @return {SpeedSummary} +* +* @example +* var azure = require('azure-storage'); +* var blobService = azure.createBlobService(); +* blobService.getBlobToLocalFile('taskcontainer', 'task1', 'task1-download.txt', function(error, serverBlob) { +* if(!error) { +* // Blob available in serverBlob.blob variable +* } +*/ +BlobService.prototype.getBlobToLocalFile = function (container, blob, localFileName, optionsOrCallback, callback) { + var options; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { options = o; callback = c; }); + + validate.validateArgs('getBlobToLocalFile', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.string(localFileName, 'localFileName'); + v.containerNameIsValid(container); + v.callback(callback); + }); + + return this._getBlobToLocalFile(container, blob, localFileName, options, callback); +}; + +/** +* Uploads a page blob from file. If the blob already exists on the service, it will be overwritten. +* To avoid overwriting and instead throw an error if the blob exists, please pass in an accessConditions parameter in the options object. +* (Not available in the JavaScript Client Library for Browsers) +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {string} localFileName The local path to the file to be uploaded. +* @param {object} [options] The request options. +* @param {SpeedSummary} [options.speedSummary] The upload tracker objects. +* @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. +* @param {string} [options.leaseId] The lease identifier. +* @param {string} [options.transactionalContentMD5] An MD5 hash of the blob content. This hash is used to verify the integrity of the blob during transport. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. +* The default value is false for page blobs. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {string} [options.blobTier] For page blobs on premium accounts only. Set the tier of the target blob. Refer to BlobUtilities.BlobTier.PremiumPageBlobTier. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link BlobResult}` will contain +* the blob information. +* `response` will contain information related to this operation. +* @return {SpeedSummary} +*/ +BlobService.prototype.createPageBlobFromLocalFile = function (container, blob, localFileName, optionsOrCallback, callback) { + return this._createBlobFromLocalFile(container, blob, BlobConstants.BlobTypes.PAGE, localFileName, optionsOrCallback, callback); +}; + +/** +* Creates a new block blob. If the blob already exists on the service, it will be overwritten. +* To avoid overwriting and instead throw an error if the blob exists, please pass in an accessConditions parameter in the options object. +* (Not available in the JavaScript Client Library for Browsers) +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {string} localFileName The local path to the file to be uploaded. +* @param {object} [options] The request options. +* @param {int} [options.blockSize] The size of each block. Maximum is 100MB. +* @param {string} [options.blockIdPrefix] The prefix to be used to generate the block id. +* @param {string} [options.leaseId] The lease identifier. +* @param {string} [options.transactionalContentMD5] The MD5 hash of the blob content. This hash is used to verify the integrity of the blob during transport. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. +* @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. The default value is true for block blobs. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link BlobResult}` will contain +* the blob information. +* `response` will contain information related to this operation. +* @return {SpeedSummary} +*/ +BlobService.prototype.createBlockBlobFromLocalFile = function (container, blob, localFileName, optionsOrCallback, callback) { + return this._createBlobFromLocalFile(container, blob, BlobConstants.BlobTypes.BLOCK, localFileName, optionsOrCallback, callback); +}; + +/** +* Creates a new append blob from a local file. If the blob already exists on the service, it will be overwritten. +* To avoid overwriting and instead throw an error if the blob exists, please pass in an accessConditions parameter in the options object. +* This API should be used strictly in a single writer scenario because the API internally uses the append-offset conditional header to avoid duplicate blocks. +* If you are guaranteed to have a single writer scenario, please look at options.absorbConditionalErrorsOnRetry and see if setting this flag to true is acceptable for you. +* If you want to append data to an already existing blob, please look at appendFromLocalFile. +* (Not available in the JavaScript Client Library for Browsers) +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {string} localFileName The local path to the file to be uploaded. +* @param {object} [options] The request options. +* @param {bool} [options.absorbConditionalErrorsOnRetry] Specifies whether to absorb the conditional error on retry. +* @param {string} [options.leaseId] The lease identifier. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. The default value is true for block blobs. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The blob's MD5 ahash. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link BlobResult}` will contain +* the blob information. +* `response` will contain information related to this operation. +* @return {SpeedSummary} +*/ +BlobService.prototype.createAppendBlobFromLocalFile = function (container, blob, localFileName, optionsOrCallback, callback) { + return this._createBlobFromLocalFile(container, blob, BlobConstants.BlobTypes.APPEND, localFileName, optionsOrCallback, callback); +}; + +/** +* Appends to an append blob from a local file. Assumes the blob already exists on the service. +* This API should be used strictly in a single writer scenario because the API internally uses the append-offset conditional header to avoid duplicate blocks. +* If you are guaranteed to have a single writer scenario, please look at options.absorbConditionalErrorsOnRetry and see if setting this flag to true is acceptable for you. +* (Not available in the JavaScript Client Library for Browsers) +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {string} localFileName The local path to the file to be uploaded. +* @param {object} [options] The request options. +* @param {bool} [options.absorbConditionalErrorsOnRetry] Specifies whether to absorb the conditional error on retry. +* @param {string} [options.leaseId] The lease identifier. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The blob's MD5 hash. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link BlobResult}` will contain +* the blob information. +* `response` will contain information related to this operation. +* @return {SpeedSummary} +*/ +BlobService.prototype.appendFromLocalFile = function (container, blob, localFileName, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('appendFromLocalFile', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.string(localFileName, 'localFileName'); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + options.speedSummary = options.speedSummary || new SpeedSummary(blob); + + var self = this; + fs.stat(localFileName, function (error, stat) { + if (error) { + callback(error); + } else { + var stream = new FileReadStream(localFileName, { calcContentMd5: options.storeBlobContentMD5 }); + var streamCallback = function (appendError, blob, response) { + if (azureutil.objectIsFunction(stream.destroy)) { + stream.destroy(); + } + callback(appendError, blob, response); + }; + + try { + self._uploadBlobFromStream(false, container, blob, BlobConstants.BlobTypes.APPEND, stream, stat.size, options, streamCallback); + } catch (err) { + callback(err); + } + } + }); + + return options.speedSummary; +}; + +// Private methods + +/** +* Creates a new blob (Block/Page/Append). If the blob already exists on the service, it will be overwritten. +* To avoid overwriting and instead throw an error if the blob exists, please pass in an accessConditions parameter in the options object. +* +* @ignore +* +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {BlobType} blobType The blob type. +* @param {string} localFileName The local path to the file to be uploaded. +* @param {object} [options] The request options. +* @param {bool} [options.absorbConditionalErrorsOnRetry] Specifies whether to absorb the conditional error on retry. (For append blob only) +* @param {int} [options.blockSize] The size of each block. Maximum is 100MB. +* @param {string} [options.blockIdPrefix] The prefix to be used to generate the block id. (For block blob only) +* @param {string} [options.leaseId] The lease identifier. +* @param {string} [options.transactionalContentMD5] An MD5 hash of the blob content. This hash is used to verify the integrity of the blob during transport. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. +* @param {bool} [options.storeBlobContentMD5] Specifies whether the blob's ContentMD5 header should be set on uploads. The default value is true for block blobs. +* @param {string} [options.blobTier] For page blobs on premium accounts only. Set the tier of the target blob. Refer to BlobUtilities.BlobTier.PremiumPageBlobTier. +* @param {object} [options.contentSettings] The content settings of the blob. +* @param {string} [options.contentSettings.contentType] The MIME content type of the blob. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the blob. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The Blob service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The blob's content disposition. +* @param {string} [options.contentSettings.contentMD5] The MD5 hash of the blob content. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback The callback function. +* +* @return {SpeedSummary} +* +*/ +BlobService.prototype._createBlobFromLocalFile = function (container, blob, blobType, localFileName, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('_createBlobFromLocalFile', function (v) { + v.string(container, 'container'); + v.string(blob, 'blob'); + v.containerNameIsValid(container); + v.blobTypeIsValid(blobType); + v.string(localFileName, 'localFileName'); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + options.speedSummary = options.speedSummary || new SpeedSummary(blob); + + var self = this; + var size = 0; + + var creationCallback = function (createError, createBlob, createResponse) { + if (createError) { + callback(createError, createBlob, createResponse); + } else { + // Automatically detect the mime type + if(azureutil.tryGetValueChain(options, ['contentSettings','contentType'], undefined) === undefined) { + azureutil.setObjectInnerPropertyValue(options, ['contentSettings','contentType'], mime.lookup(localFileName)); + } + + var stream = new FileReadStream(localFileName, { calcContentMd5: options.storeBlobContentMD5 }); + var streamCallback = function (createError, createBlob, createResponse) { + if (azureutil.objectIsFunction(stream.destroy)) { + stream.destroy(); + } + callback(createError, createBlob, createResponse); + }; + self._uploadBlobFromStream(true, container, blob, blobType, stream, size, options, streamCallback); + } + }; + + // Check the file size to determine the upload method: single request or chunks + fs.stat(localFileName, function (error, stat) { + if (error) { + callback(error); + } else { + size = stat.size; + try { + self._createBlob(container, blob, blobType, size, options, creationCallback); + } catch (err) { + callback(err); + } + } + }); + + return options.speedSummary; +}; + +/** +* Downloads a blob into a file. +* @ignore +* @this {BlobService} +* @param {string} container The container name. +* @param {string} blob The blob name. +* @param {string} localFileName The local path to the file to be downloaded. +* @param {object} [options] The request options. +* @param {boolean} [options.skipSizeCheck] Skip the size check to perform direct download. +* Set the option to true for small blobs. +* Parallel download and speed summary won't work with this option on. +* @param {SpeedSummary} [options.speedSummary] The download tracker objects. +* @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. +* @param {string} [options.snapshotId] The snapshot identifier. +* @param {string} [options.leaseId] The lease identifier. +* @param {string} [options.rangeStart] Return only the bytes of the blob in the specified range. +* @param {string} [options.rangeEnd] Return only the bytes of the blob in the specified range. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {boolean} [options.useTransactionalMD5] When set to true, Calculate and send/validate content MD5 for transactions. +* @param {boolean} [options.disableContentMD5Validation] When set to true, MD5 validation will be disabled when downloading blobs. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `result` will contain the blob information. +* `response` will contain information related to this operation. +* @return {SpeedSummary} +* +*/ +BlobService.prototype._getBlobToLocalFile = function (container, blob, localFileName, optionsOrCallback, callback) { + var options; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { options = o; callback = c; }); + options.speedSummary = options.speedSummary || new SpeedSummary(blob); + + var writeStream = fs.createWriteStream(localFileName, { 'highWaterMark': BlobConstants.MAX_QUEUED_WRITE_DISK_BUFFER_SIZE }); + writeStream.on('error', function (error) { + callback(error); + }); + + this.getBlobToStream(container, blob, writeStream, options, function (error, responseBlob, response) { + if (error) { + var onErrorCallback = function() { + // If the download failed from the beginning, remove the file. + if (fs.existsSync(localFileName) && writeStream.bytesWritten === 0) { + fs.unlinkSync(localFileName); + } + callback(error, responseBlob, response); + }; + if (!writeStream.closed) { + writeStream.end(onErrorCallback); + } else { + onErrorCallback(); + } + } else { + callback(error, responseBlob, response); + } + }); + + return options.speedSummary; +}; + +module.exports = BlobService; + +/***/ }), + +/***/ 89959: +/***/ ((module, exports) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Expose 'BlobUtilities'. +exports = module.exports; + +/** +* Defines constants, enums, and utility functions for use with the Blob service. +* @namespace BlobUtilities +*/ +var BlobUtilities = { + /** + * Permission types + * + * @const + * @enum {string} + */ + SharedAccessPermissions: { + READ: 'r', + ADD: 'a', + CREATE: 'c', + WRITE: 'w', + DELETE: 'd', + LIST: 'l' + }, + + /** + * Blob listing details. + * + * @const + * @enum {string} + */ + BlobListingDetails: { + SNAPSHOTS: 'snapshots', + METADATA: 'metadata', + UNCOMMITTED_BLOBS: 'uncommittedblobs', + COPY: 'copy', + DELETED: 'deleted' + }, + + /** + * Deletion options for blob snapshots + * + * @const + * @enum {string} + */ + SnapshotDeleteOptions: { + SNAPSHOTS_ONLY: 'only', + BLOB_AND_SNAPSHOTS: 'include' + }, + + /** + * Type of block list to retrieve + * + * @const + * @enum {string} + */ + BlockListFilter: { + ALL: 'all', + COMMITTED: 'committed', + UNCOMMITTED: 'uncommitted' + }, + + /** + * Blobs and container public access types. + * + * @const + * @enum {string} + */ + BlobContainerPublicAccessType: { + OFF: null, + CONTAINER: 'container', + BLOB: 'blob' + }, + + /** + * Describes actions that can be performed on a page blob sequence number. + * @const + * @enum {string} + */ + SequenceNumberAction: { + MAX: 'max', + UPDATE: 'update', + INCREMENT: 'increment' + }, + + /** + * Candidate values for blob tiers. + * + * @property {object} PremiumPageBlobTier Candidate values for premium pageblob tiers. + * @property {string} PremiumPageBlobTier.P4 + * @property {string} PremiumPageBlobTier.P6 + * @property {string} PremiumPageBlobTier.P10 + * @property {string} PremiumPageBlobTier.P20 + * @property {string} PremiumPageBlobTier.P30 + * @property {string} PremiumPageBlobTier.P40 + * @property {string} PremiumPageBlobTier.P50 + * @property {string} PremiumPageBlobTier.P60 + * @property {object} StandardBlobTier Candidate values for standard blobs tiers. + * @property {string} StandardBlobTier.HOT + * @property {string} StandardBlobTier.COOL + * @property {string} StandardBlobTier.ARCHIVE + */ + BlobTier: { + PremiumPageBlobTier: { + P4: 'P4', + P6: 'P6', + P10: 'P10', + P20: 'P20', + P30: 'P30', + P40: 'P40', + P50: 'P50', + P60: 'P60' + }, + StandardBlobTier: { + HOT: 'Hot', + COOL: 'Cool', + ARCHIVE: 'Archive' + } + } +}; + +module.exports = BlobUtilities; + +/***/ }), + +/***/ 99032: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +var Constants = __nccwpck_require__(60658); +var EventEmitter = (__nccwpck_require__(82361).EventEmitter); +var BlobUtilities = __nccwpck_require__(89959); + +/** +* BlockBlob block range stream +*/ +function BlockRangeStream(blobServiceClient, container, blob, options) { + this.blobServiceClient = blobServiceClient; + this.container = container; + this.blob = blob; + this._emitter = new EventEmitter(); + this._paused = false; + this._emittedAll = false; + this._emittedRangeType = null; + this._emittedRangeIndex = null; + this._offset = 0; + this._rangelist = []; + this._isEmitting = false; + if (options.rangeStart) { + this._startOffset = options.rangeStart; + } else { + this._startOffset = 0; + } + if (options.rangeEnd) { + this._endOffset = options.rangeEnd; + } else { + this._endOffset = Number.MAX_VALUE; + } +} + +/** +* Add event listener +*/ +BlockRangeStream.prototype.on = function (event, listener) { + this._emitter.on(event, listener); +}; + +/** +* Get block list +*/ +BlockRangeStream.prototype.list = function (options, callback) { + if (!options) { + options = {}; + } + + if (!options.blockListFilter) { + options.blockListFilter = BlobUtilities.BlockListFilter.ALL; + } + + var self = this; + this.blobServiceClient.listBlocks(this.container, this.blob, options.blockListFilter, options, function (error, blocklist, response) { + if (error) { + callback(error); + } else { + var totalSize = parseInt(response.headers[Constants.HeaderConstants.BLOB_CONTENT_LENGTH], 10); + if (!blocklist.CommittedBlocks) { + //Convert single block blob to block blob range + var name = 'NODESDK_BLOCKBLOB_RANGESTREAM'; + blocklist.CommittedBlocks = [{ Name : name, Size : totalSize }]; + } + + self._rangelist = blocklist; + self._emitBlockList(); + self = blocklist = null; + } + }); +}; + +/** +* Emit block ranges +*/ +BlockRangeStream.prototype._emitBlockList = function () { + if (this._paused || this._emittedAll || this._isEmitting) return; + + var self = this; + this._getTypeList(function () { + self._rangelist = null; + self._emittedAll = true; + self._emitter.emit('end'); + }); +}; + +/** +* Get the block type list +*/ +BlockRangeStream.prototype._getTypeList = function (callback) { + this._isEmitting = true; + try { + var typeStart = false; + if (this._rangelist) { + for (var blockType in this._rangelist) { + if (this._rangelist.hasOwnProperty(blockType)) { + if (this._emittedRangeType === null || typeStart || this._emittedRangeType == blockType) { + this._emittedRangeType = blockType; + typeStart = true; + } else if (this._emittedRangeType !== blockType) { + continue; + } + + if (this._paused) { + return; + } + + this._emitBlockRange (blockType, callback); + } + } + } + } finally { + this._isEmitting = false; + } +}; + +/** +* Get the block list +*/ +BlockRangeStream.prototype._emitBlockRange = function (blockType, callback) { + var blockList = this._rangelist[blockType]; + var indexStart = false; + for (var blockIndex = 0; blockIndex < blockList.length; blockIndex++) { + if (this._emittedRangeIndex === null || indexStart || this._emittedRangeIndex === blockIndex) { + this._emittedRangeIndex = blockIndex; + indexStart = true; + } else if (this._emittedRangeIndex !== blockIndex) { + continue; + } + + if (this._paused) { + return; + } + + var range = blockList[blockIndex]; + // follow the same naming convention of page ranges and json + range.name = range.Name; + range.type = blockType; + range.start = this._offset; + this._offset += parseInt(range.Size, 10); + range.end = this._offset - 1; + delete range.Name; + delete range.Size; + + if (range.start > this._endOffset) { + break; + } else if (range.end < this._startOffset) { + continue; + } else { + range.start = Math.max(range.start, this._startOffset); + range.end = Math.min(range.end, this._endOffset); + range.size = range.end - range.start + 1; + range.dataSize = range.size; + this._emitter.emit('range', range); + } + } + + // remove the used range and avoid memory leak + this._rangelist[blockType] = null; + + callback(); +}; + +/** +* Pause the stream +*/ +BlockRangeStream.prototype.pause = function () { + this._paused = true; +}; + +/** +* Resume the stream +*/ +BlockRangeStream.prototype.resume = function () { + this._paused = false; + if (!this._isEmitting) { + this._emitBlockList(); + } +}; + +/** +* Stop the stream +*/ +BlockRangeStream.prototype.stop = function () { + this.pause(); + this._emittedAll = true; + this._emitter.emit('end'); +}; + +module.exports = BlockRangeStream; + + +/***/ }), + +/***/ 96051: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +var util = __nccwpck_require__(73837); +var RangeStream = __nccwpck_require__(98664); +var Constants = __nccwpck_require__(60658); + +/** +* PageBlob page range stream +*/ +function PageRangeStream(blobServiceClient, container, blob, options) { + PageRangeStream['super_'].call(this, blobServiceClient, container, blob, options); + + if (options.minRangeSize) { + this._minRangeSize = options.minRangeSize; + } else { + this._minRangeSize = Constants.BlobConstants.MIN_WRITE_PAGE_SIZE_IN_BYTES; + } + if (options.maxRangeSize) { + this._maxRangeSize = options.maxRangeSize; + } else { + this._maxRangeSize = Constants.BlobConstants.DEFAULT_WRITE_PAGE_SIZE_IN_BYTES; + } + this._lengthHeader = Constants.HeaderConstants.BLOB_CONTENT_LENGTH; + this._listFunc = blobServiceClient.listPageRanges; +} + +util.inherits(PageRangeStream, RangeStream); + +module.exports = PageRangeStream; + + +/***/ }), + +/***/ 3128: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var _ = __nccwpck_require__(7404); + +var azureCommon = __nccwpck_require__(82187); +var azureutil = azureCommon.util; +var Constants = azureCommon.Constants; +var HeaderConstants = Constants.HeaderConstants; + +/** +* Creates a new BlobResult object. +* @class +* The BlobResult class is used to store the blob information. +* + * @property {string} container The container name. + * @property {string} name The blob name. + * @property {object} metadata The metadata key/value pair. + * @property {string} etag The etag. + * @property {string} lastModified The date/time that the blob was last modified. + * @property {string} contentLength The size of the blob in bytes. + * @property {string} blobType The blob type. + * @property {boolean} isIncrementalCopy If the blob is incremental copy blob. + * @property {string} requestId The request id. + * @property {string} sequenceNumber The current sequence number for a page blob. + * @property {string} contentRange The content range. + * @property {string} committedBlockCount The committed block count. + * @property {string} serverEncrypted If the blob data and application metadata are completely encrypted using the specified algorithm. true/false. + * @property {object} contentSettings The content settings. + * @property {string} contentSettings.contentType The content type. + * @property {string} contentSettings.contentEncoding The content encoding. + * @property {string} contentSettings.contentLanguage The content language. + * @property {string} contentSettings.cacheControl The cache control. + * @property {string} contentSettings.contentDisposition The content disposition. + * @property {string} contentSettings.contentMD5 The content MD5 hash. + * @property {object} lease The lease information. + * @property {string} lease.id The lease id. + * @property {string} lease.status The lease status. + * @property {string} lease.state The lease state. + * @property {string} lease.duration The lease duration. + * @property {object} copy The copy information. + * @property {string} copy.id The copy id. + * @property {string} copy.status The copy status. + * @property {string} copy.completionTime The copy completion time. + * @property {string} copy.statusDescription The copy status description. + * @property {string} copy.destinationSnapshot The snapshot time of the last successful incremental copy snapshot for this blob. + * @property {string} copy.progress The copy progress. + * @property {string} copy.source The copy source. + * +* @constructor +* @param {string} [container] The container name. +* @param {string} [name] The blob name. +*/ +function BlobResult(container, name) { + if (container) { + this.container = container; + } + + if (name) { + this.name = name; + } +} + +BlobResult.parse = function (blobXml) { + var blobResult = new BlobResult(); + + for (var propertyName in blobXml) { + if (blobXml.hasOwnProperty(propertyName)) { + if (propertyName === 'Properties') { + // Lift out the properties onto the main object to keep consistent across all APIs like: getBlobProperties + azureutil.setPropertyValueFromXML(blobResult, blobXml[propertyName], true); + } else if (propertyName === 'Metadata') { + var resultPropertyName = azureutil.normalizePropertyNameFromXML(propertyName); + blobResult[resultPropertyName] = {}; + azureutil.setPropertyValueFromXML(blobResult[resultPropertyName], blobXml[propertyName], false); + } else { + blobResult[propertyName.toLowerCase()] = blobXml[propertyName]; + } + } + } + + if (blobResult.isIncrementalCopy !== undefined) { + blobResult.isIncrementalCopy = (blobResult.isIncrementalCopy === 'true'); + } + + // convert accessTierInferred to boolean type + if (blobResult.accessTierInferred !== undefined) { + blobResult.accessTierInferred = (blobResult.accessTierInferred === 'true'); + } + + if (blobResult.deleted !== undefined) { + blobResult.deleted = (blobResult.deleted == 'true'); + } + + if (blobResult.remainingRetentionDays !== undefined) { + blobResult.remainingRetentionDays = parseInt(blobResult.remainingRetentionDays); + } + + return blobResult; +}; + +var headersForProperties = { + 'lastModified': 'LAST_MODIFIED', + 'creationTime': 'CREATION_TIME', + 'etag': 'ETAG', + 'sequenceNumber': 'SEQUENCE_NUMBER', + 'blobType': 'BLOB_TYPE', + 'contentLength': 'CONTENT_LENGTH', + 'blobContentLength': 'BLOB_CONTENT_LENGTH', + 'contentRange': 'CONTENT_RANGE', + 'committedBlockCount': 'BLOB_COMMITTED_BLOCK_COUNT', + 'serverEncrypted': 'SERVER_ENCRYPTED', + 'requestId': 'REQUEST_ID', + + 'range': 'RANGE', + 'blobRange': 'STORAGE_RANGE', + 'getContentMd5': 'RANGE_GET_CONTENT_MD5', + 'acceptRanges': 'ACCEPT_RANGES', + 'appendOffset': 'BLOB_APPEND_OFFSET', + + 'accessTier': 'ACCESS_TIER', + 'accessTierChangeTime': 'ACCESS_TIER_CHANGE_TIME', + 'accessTierInferred': 'ACCESS_TIER_INFERRED', + 'archiveStatus': 'ARCHIVE_STATUS', + + 'isIncrementalCopy': 'INCREMENTAL_COPY', + + // ContentSettings + 'contentSettings.contentType': 'CONTENT_TYPE', + 'contentSettings.contentEncoding': 'CONTENT_ENCODING', + 'contentSettings.contentLanguage': 'CONTENT_LANGUAGE', + 'contentSettings.cacheControl': 'CACHE_CONTROL', + 'contentSettings.contentDisposition': 'CONTENT_DISPOSITION', + 'contentSettings.contentMD5': 'CONTENT_MD5', + + // Lease + 'lease.id': 'LEASE_ID', + 'lease.status': 'LEASE_STATUS', + 'lease.duration': 'LEASE_DURATION', + 'lease.state': 'LEASE_STATE', + + // Copy + 'copy.id': 'COPY_ID', + 'copy.status': 'COPY_STATUS', + 'copy.source': 'COPY_SOURCE', + 'copy.progress': 'COPY_PROGRESS', + 'copy.completionTime': 'COPY_COMPLETION_TIME', + 'copy.statusDescription': 'COPY_STATUS_DESCRIPTION', + 'copy.destinationSnapshot': 'COPY_DESTINATION_SNAPSHOT' +}; + +BlobResult.prototype.getPropertiesFromHeaders = function (headers) { + var self = this; + + var setBlobPropertyFromHeaders = function (blobProperty, headerProperty) { + if (!azureutil.tryGetValueChain(self, blobProperty.split('.'), null) && headers[headerProperty.toLowerCase()]) { + azureutil.setObjectInnerPropertyValue(self, blobProperty.split('.'), headers[headerProperty.toLowerCase()]); + + if (blobProperty === 'copy.progress') { + var info = azureutil.parseCopyProgress(self.copy.progress); + self.copy.bytesCopied = parseInt(info.bytesCopied); + self.copy.totalBytes = parseInt(info.totalBytes); + } + } + }; + + // For range get, 'x-ms-blob-content-md5' indicate the overall MD5 of the blob. Try to set the contentMD5 using this header if it presents + setBlobPropertyFromHeaders('contentSettings.contentMD5', HeaderConstants.BLOB_CONTENT_MD5); + + _.chain(headersForProperties).pairs().each(function (pair) { + var property = pair[0]; + var header = HeaderConstants[pair[1]]; + setBlobPropertyFromHeaders(property, header); + }); + + // convert isIncrementalCopy to boolean type + if (self.isIncrementalCopy !== undefined) { + self.isIncrementalCopy = (self.isIncrementalCopy === 'true'); + } + + // convert accessTierInferred to boolean type + if (self.accessTierInferred !== undefined) { + self.accessTierInferred = (self.accessTierInferred == 'true'); + } +}; + +/** +* This method sets the HTTP headers and is used by all methods except setBlobProperties and commitBlocks. Those 2 methods will set the x-ms-* headers using setPropertiesFromBlob. +* @ignore +*/ +BlobResult.setHeadersFromBlob = function (webResource, blob) { + var setHeaderPropertyFromBlob = function (headerProperty, blobProperty) { + var blobPropertyValue = azureutil.tryGetValueChain(blob, blobProperty.split('.'), null); + if (blobPropertyValue) { + webResource.withHeader(headerProperty, blobPropertyValue); + } + }; + + if (blob) { + // Content-Type + setHeaderPropertyFromBlob(HeaderConstants.BLOB_CONTENT_TYPE, 'contentSettings.contentType'); + + // Content-Encoding + setHeaderPropertyFromBlob(HeaderConstants.BLOB_CONTENT_ENCODING, 'contentSettings.contentEncoding'); + + // Content-Language + setHeaderPropertyFromBlob(HeaderConstants.BLOB_CONTENT_LANGUAGE, 'contentSettings.contentLanguage'); + + // Content-Disposition + setHeaderPropertyFromBlob(HeaderConstants.BLOB_CONTENT_DISPOSITION, 'contentSettings.contentDisposition'); + + // Cache-Control + setHeaderPropertyFromBlob(HeaderConstants.BLOB_CACHE_CONTROL, 'contentSettings.cacheControl'); + + // Blob's Content-MD5 + setHeaderPropertyFromBlob(HeaderConstants.BLOB_CONTENT_MD5, 'contentSettings.contentMD5'); + + // Content-Length + setHeaderPropertyFromBlob(HeaderConstants.CONTENT_LENGTH, 'contentLength'); + + // transactional Content-MD5 + setHeaderPropertyFromBlob(HeaderConstants.CONTENT_MD5, 'transactionalContentMD5'); + + // Range + if (!azureutil.objectIsNull(blob.rangeStart)) { + var range = 'bytes=' + blob.rangeStart + '-'; + + if (!azureutil.objectIsNull(blob.rangeEnd)) { + range += blob.rangeEnd; + } + + webResource.withHeader(HeaderConstants.RANGE, range); + } + + // Source Range + if (!azureutil.objectIsNull(blob.sourceRangeStart)) { + var sourceRange = 'bytes=' + blob.sourceRangeStart + '-'; + + if (!azureutil.objectIsNull(blob.sourceRangeEnd)) { + sourceRange += blob.sourceRangeEnd; + } + + webResource.withHeader(HeaderConstants.SOURCE_RANGE, sourceRange); + } + + // Blob Type + setHeaderPropertyFromBlob(HeaderConstants.BLOB_TYPE, 'blobType'); + + // Lease id + setHeaderPropertyFromBlob(HeaderConstants.LEASE_ID, 'leaseId'); + + // Sequence number + setHeaderPropertyFromBlob(HeaderConstants.SEQUENCE_NUMBER, 'sequenceNumber'); + setHeaderPropertyFromBlob(HeaderConstants.SEQUENCE_NUMBER_ACTION, 'sequenceNumberAction'); + + if (blob.metadata) { + webResource.addOptionalMetadataHeaders(blob.metadata); + } + } +}; + +/** +* This method sets the x-ms-* headers and is used by setBlobProperties and commitBlocks. All other methods will set the regular HTTP headers using setHeadersFromBlob. +* @ignore +*/ +BlobResult.setPropertiesFromBlob = function (webResource, blob) { + var setHeaderPropertyFromBlob = function (headerProperty, blobProperty) { + var propertyValue = azureutil.tryGetValueChain(blob, blobProperty.split('.'), null); + if (propertyValue) { + webResource.withHeader(headerProperty, propertyValue); + } + }; + + if (blob) { + // Content-Type + setHeaderPropertyFromBlob(HeaderConstants.BLOB_CONTENT_TYPE, 'contentSettings.contentType'); + + // Content-Encoding + setHeaderPropertyFromBlob(HeaderConstants.BLOB_CONTENT_ENCODING, 'contentSettings.contentEncoding'); + + // Content-Language + setHeaderPropertyFromBlob(HeaderConstants.BLOB_CONTENT_LANGUAGE, 'contentSettings.contentLanguage'); + + // Content-Disposition + setHeaderPropertyFromBlob(HeaderConstants.BLOB_CONTENT_DISPOSITION, 'contentSettings.contentDisposition'); + + // Cache-Control + setHeaderPropertyFromBlob(HeaderConstants.BLOB_CACHE_CONTROL, 'contentSettings.cacheControl'); + + // Content-MD5 + setHeaderPropertyFromBlob(HeaderConstants.BLOB_CONTENT_MD5, 'contentSettings.contentMD5'); + + // Lease id + setHeaderPropertyFromBlob(HeaderConstants.LEASE_ID, 'leaseId'); + + if (blob.metadata) { + webResource.addOptionalMetadataHeaders(blob.metadata); + } + } +}; + +module.exports = BlobResult; + + +/***/ }), + +/***/ 50440: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var _ = __nccwpck_require__(7404); + +var azureCommon = __nccwpck_require__(82187); +var xmlbuilder = azureCommon.xmlbuilder; +var Constants = azureCommon.Constants; + +/** +* Builds an XML representation for a block list. +* +* @param {array} The block list. +* @return {string} The XML block list. +*/ +exports.serialize = function (blockListJs) { + var blockListDoc = xmlbuilder.create(Constants.BlobConstants.BLOCK_LIST_ELEMENT, { version: '1.0', encoding: 'utf-8' }); + + if (_.isArray(blockListJs.LatestBlocks)) { + blockListJs.LatestBlocks.forEach(function (block) { + blockListDoc = blockListDoc.ele(Constants.BlobConstants.LATEST_ELEMENT) + .txt(Buffer.from(block).toString('base64')) + .up(); + }); + } + + if (_.isArray(blockListJs.CommittedBlocks)) { + blockListJs.CommittedBlocks.forEach(function (block) { + blockListDoc = blockListDoc.ele(Constants.BlobConstants.COMMITTED_ELEMENT) + .txt(Buffer.from(block).toString('base64')) + .up(); + }); + } + + if (_.isArray(blockListJs.UncommittedBlocks)) { + blockListJs.UncommittedBlocks.forEach(function (block) { + blockListDoc = blockListDoc.ele(Constants.BlobConstants.UNCOMMITTED_ELEMENT) + .txt(Buffer.from(block).toString('base64')) + .up(); + }); + } + + return blockListDoc.doc().toString(); +}; + +exports.parse = function (blockListXml) { + var blockListResult = {}; + + if (blockListXml.CommittedBlocks && blockListXml.CommittedBlocks.Block) { + blockListResult.CommittedBlocks = blockListXml.CommittedBlocks.Block; + if (!_.isArray(blockListResult.CommittedBlocks)) { + blockListResult.CommittedBlocks = [blockListResult.CommittedBlocks]; + } + blockListResult.CommittedBlocks.forEach(function(block) { + block.Name = Buffer.from(block.Name, 'base64').toString(); + }); + } + + if (blockListXml.UncommittedBlocks && blockListXml.UncommittedBlocks.Block) { + blockListResult.UncommittedBlocks = blockListXml.UncommittedBlocks.Block; + if (!_.isArray(blockListResult.UncommittedBlocks)) { + blockListResult.UncommittedBlocks = [blockListResult.UncommittedBlocks]; + } + blockListResult.UncommittedBlocks.forEach(function(block) { + block.Name = Buffer.from(block.Name, 'base64').toString(); + }); + } + + return blockListResult; +}; + +/***/ }), + +/***/ 56983: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var azureCommon = __nccwpck_require__(82187); +var azureutil = azureCommon.util; +var Constants = azureCommon.Constants; + +var HeaderConstants = Constants.HeaderConstants; +var BlobUtilities = __nccwpck_require__(89959); + +/** +* Creates a new ContainerResult object. +* @class +* The ContainerResult class is used to store the container information. +* + * @property {string} name The container name. + * @property {string} publicAccessLevel The public access level. + * @property {object} metadata The metadata key/value pair. + * @property {string} etag The etag. + * @property {string} lastModified The date/time that the container was last modified. + * @property {string} requestId The request id. + * @property {object} lease The lease information. + * @property {string} lease.status The lease status. + * @property {string} lease.state The lease state. + * @property {string} lease.duration The lease duration. + * +* @constructor +* @param {string} [container] The container name. +* @param {string} [publicAccessLevel] The public access level. +*/ +function ContainerResult(name, publicAccessLevel) { + if (name) { + this.name = name; + } + + if (publicAccessLevel) { + this.publicAccessLevel = publicAccessLevel; + } +} + +ContainerResult.parse = function (containerXml) { + var containerResult = new ContainerResult(); + + for (var propertyName in containerXml) { + if (containerXml.hasOwnProperty(propertyName)) { + if (propertyName === 'Properties') { + // Lift out the properties onto the main object to keep consistent across all APIs like: getContainerProperties + azureutil.setPropertyValueFromXML(containerResult, containerXml[propertyName], true); + } else if (propertyName === 'Metadata') { + var resultPropertyName = azureutil.normalizePropertyNameFromXML(propertyName); + containerResult[resultPropertyName] = {}; + azureutil.setPropertyValueFromXML(containerResult[resultPropertyName], containerXml[propertyName], false); + } else { + containerResult[propertyName.toLowerCase()] = containerXml[propertyName]; + } + } + } + + if (!containerResult.publicAccessLevel) { + containerResult.publicAccessLevel = BlobUtilities.BlobContainerPublicAccessType.OFF; + } + + // convert hasImmutabilityPolicy to boolean type + if (containerResult.hasImmutabilityPolicy !== undefined) { + containerResult.hasImmutabilityPolicy = (containerResult.hasImmutabilityPolicy === 'true'); + } + + // convert hasLegalHold to boolean type + if (containerResult.hasLegalHold !== undefined) { + containerResult.hasLegalHold = (containerResult.hasLegalHold === 'true'); + } + + return containerResult; +}; + +ContainerResult.prototype.getPropertiesFromHeaders = function (headers) { + var self = this; + + var setContainerPropertyFromHeaders = function (containerProperty, headerProperty) { + if (!azureutil.tryGetValueChain(self, containerProperty.split('.'), null) && headers[headerProperty.toLowerCase()]) { + azureutil.setObjectInnerPropertyValue(self, containerProperty.split('.'), headers[headerProperty.toLowerCase()]); + } + }; + + setContainerPropertyFromHeaders('etag', HeaderConstants.ETAG); + setContainerPropertyFromHeaders('lastModified', HeaderConstants.LAST_MODIFIED); + setContainerPropertyFromHeaders('lease.status', HeaderConstants.LEASE_STATUS); + setContainerPropertyFromHeaders('lease.state', HeaderConstants.LEASE_STATE); + setContainerPropertyFromHeaders('lease.duration', HeaderConstants.LEASE_DURATION); + setContainerPropertyFromHeaders('requestId', HeaderConstants.REQUEST_ID); + setContainerPropertyFromHeaders('hasImmutabilityPolicy', HeaderConstants.HAS_IMMUTABILITY_POLICY); + setContainerPropertyFromHeaders('hasLegalHold', HeaderConstants.HAS_LEGAL_HOLD); + + // convert hasImmutabilityPolicy to boolean type + if (self.hasImmutabilityPolicy !== undefined) { + self.hasImmutabilityPolicy = (self.hasImmutabilityPolicy === 'true'); + } + + // convert hasLegalHold to boolean type + if (self.hasLegalHold !== undefined) { + self.hasLegalHold = (self.hasLegalHold === 'true'); + } + + if (!self.publicAccessLevel) { + self.publicAccessLevel = BlobUtilities.BlobContainerPublicAccessType.OFF; + if (headers[HeaderConstants.BLOB_PUBLIC_ACCESS]) { + self.publicAccessLevel = headers[HeaderConstants.BLOB_PUBLIC_ACCESS]; + } + } + + if (self.publicAccessLevel === 'true') { + // The container was marked for full public read access using a version prior to 2009-09-19. + self.publicAccessLevel = BlobUtilities.BlobContainerPublicAccessType.CONTAINER; + } +}; + +/** +* The container ACL settings. +* @typedef {object} ContainerAclResult +* @extends {ContainerResult} +* @property {Object.} signedIdentifiers The container ACL settings. See `[AccessPolicy]{@link AccessPolicy}` for detailed information. +*/ + +module.exports = ContainerResult; + +/***/ }), + +/***/ 96561: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var Constants = (__nccwpck_require__(82187).Constants); +var HeaderConstants = Constants.HeaderConstants; + + +/** +* Creates a new LeaseResult object. +* @class +* The LeaseResult class is used to store the lease information. +* + * @property {string} container The container name. + * @property {string} blob The blob name. + * @property {string} id The lease id. + * @property {string} time Approximate time remaining in the lease period, in seconds. + * @property {string} etag The etag. + * @property {string} lastModified The date/time that the lease was last modified. + * +* @constructor +* @param {string} [container] The container name. +* @param {string} [blob] The blob name. +* @param {string} [id] The lease id. +* @param {string} [time] Approximate time remaining in the lease period, in seconds. +*/ +function LeaseResult(container, blob, id, time) { + if (container) { + this.container = container; + } + + if (blob) { + this.blob = blob; + } + + if (id) { + this.id = id; + } + + if (time) { + this.time = time; + } +} + +LeaseResult.prototype.getPropertiesFromHeaders = function (headers) { + var self = this; + + if (!self['id'] && headers[HeaderConstants.LEASE_ID]) { + self['id'] = headers[HeaderConstants.LEASE_ID]; + } + + if (!self['time'] && headers[HeaderConstants.LEASE_TIME]) { + self['time'] = parseInt(headers[HeaderConstants.LEASE_TIME], 10); + } + + self['etag'] = headers[HeaderConstants.ETAG]; + self['lastModified'] = headers[HeaderConstants.LAST_MODIFIED.toLowerCase()]; +}; + +module.exports = LeaseResult; + +/***/ }), + +/***/ 99087: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var qs = __nccwpck_require__(63477); +var url = __nccwpck_require__(57310); +var util = __nccwpck_require__(73837); +var _ = __nccwpck_require__(7404); +var extend = __nccwpck_require__(99237); +var path = __nccwpck_require__(71017); + +var azureCommon = __nccwpck_require__(82187); +var Md5Wrapper = __nccwpck_require__(11007); +var azureutil = azureCommon.util; +var SR = azureCommon.SR; +var validate = azureCommon.validate; +var SpeedSummary = azureCommon.SpeedSummary; +var StorageServiceClient = azureCommon.StorageServiceClient; +var WebResource = azureCommon.WebResource; + +// Constants +var Constants = azureCommon.Constants; +var FileConstants = Constants.FileConstants; +var HeaderConstants = Constants.HeaderConstants; +var HttpConstants = Constants.HttpConstants; +var QueryStringConstants = Constants.QueryStringConstants; + +// Streams +var BatchOperation = azureCommon.BatchOperation; +var SpeedSummary = azureCommon.SpeedSummary; +var ChunkAllocator = azureCommon.ChunkAllocator; +var ChunkStream = azureCommon.ChunkStream; +var ChunkStreamWithStream = azureCommon.ChunkStreamWithStream; +var FileRangeStream = __nccwpck_require__(56575); + +// Models requires +var ShareResult = __nccwpck_require__(12636); +var DirectoryResult = __nccwpck_require__(75667); +var FileResult = __nccwpck_require__(7253); +var AclResult = azureCommon.AclResult; + +// Errors requires +var errors = __nccwpck_require__(12528); +var ArgumentNullError = errors.ArgumentNullError; +var ArgumentError = errors.ArgumentError; + +/** +* Creates a new FileService object. +* If no connection string or storageaccount and storageaccesskey are provided, +* the AZURE_STORAGE_CONNECTION_STRING or AZURE_STORAGE_ACCOUNT and AZURE_STORAGE_ACCESS_KEY environment variables will be used. +* @class +* The FileService class is used to perform operations on the Microsoft Azure File Service. +* The File Service provides storage for binary large objects, and provides functions for working with data stored in files. +* +* For more information on the File Service, as well as task focused information on using it in a Node.js application, see +* [How to Use the File Service from Node.js](http://azure.microsoft.com/en-us/documentation/articles/storage-nodejs-how-to-use-file-storage/). +* The following defaults can be set on the file service. +* defaultTimeoutIntervalInMs The default timeout interval, in milliseconds, to use for request made via the file service. +* defaultEnableReuseSocket The default boolean value to enable socket reuse when uploading local files or streams. +* If the Node.js version is lower than 0.10.x, socket reuse will always be turned off. +* defaultClientRequestTimeoutInMs The default timeout of client requests, in milliseconds, to use for the request made via the file service. +* defaultMaximumExecutionTimeInMs The default maximum execution time across all potential retries, for requests made via the file service. +* defaultLocationMode The default location mode for requests made via the file service. +* parallelOperationThreadCount The number of parallel operations that may be performed when uploading a file. +* useNagleAlgorithm Determines whether the Nagle algorithm is used for requests made via the file service; true to use the +* Nagle algorithm; otherwise, false. The default value is false. +* enableGlobalHttpAgent Determines whether global HTTP(s) agent is enabled; true to use Global HTTP(s) agent; otherwise, false to use +* http(s).Agent({keepAlive:true}). +* @constructor +* @extends {StorageServiceClient} +* +* @param {string} [storageAccountOrConnectionString] The storage account or the connection string. +* @param {string} [storageAccessKey] The storage access key. +* @param {string|object} [host] The host address. To define primary only, pass a string. +* Otherwise 'host.primaryHost' defines the primary host and 'host.secondaryHost' defines the secondary host. +* @param {string} [sasToken] The Shared Access Signature token. +* @param {string} [endpointSuffix] The endpoint suffix. +*/ +function FileService(storageAccountOrConnectionString, storageAccessKey, host, sasToken, endpointSuffix) { + var storageServiceSettings = StorageServiceClient.getStorageSettings(storageAccountOrConnectionString, storageAccessKey, host, sasToken, endpointSuffix); + + FileService['super_'].call(this, + storageServiceSettings._name, + storageServiceSettings._key, + storageServiceSettings._fileEndpoint, + storageServiceSettings._usePathStyleUri, + storageServiceSettings._sasToken); + + this.defaultEnableReuseSocket = Constants.DEFAULT_ENABLE_REUSE_SOCKET; + this.singleFileThresholdInBytes = FileConstants.DEFAULT_SINGLE_FILE_GET_THRESHOLD_IN_BYTES; + this.parallelOperationThreadCount = Constants.DEFAULT_PARALLEL_OPERATION_THREAD_COUNT; +} + +util.inherits(FileService, StorageServiceClient); + +// Utility methods + +/** +* Create resource name +* @ignore +* +* @param {string} share Share name +* @param {string} [directory] Directory name +* @param {string} [file] File name +* @return {string} The encoded resource name. +*/ +function createResourceName(share, directory, file, forSAS) { + var encode = function(name) { + if (name && !forSAS) { + name = encodeURIComponent(name); + name = name.replace(/%2F/g, '/'); + name = name.replace(/%5C/g, '/'); + name = name.replace(/\+/g, '%20'); + } + return name; + }; + + var name = share; + + if (directory) { + // if directory does not start with '/', add it + if (directory[0] !== '/') { + name += ('/'); + } + + name += encode(directory); + } + + if (file) { + // if the current path does not end with '/', add it + if (name[name.length - 1] !== '/') { + name += ('/'); + } + + name += encode(file); + } + + return path.normalize(name).replace(/\\/g, '/'); +} + +// File service methods + +/** +* Gets the properties of a storage account's File service, including Azure Storage Analytics. +* +* @this {FileService} +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; otherwise, `[result]{@link ServiceProperties}` will contain the properties +* and `response` will contain information related to this operation. +*/ +FileService.prototype.getServiceProperties = function (optionsOrCallback, callback) { + return this.getAccountServiceProperties(optionsOrCallback, callback); +}; + +/** +* Sets the properties of a storage account's File service, including Azure Storage Analytics. +* You can also use this operation to set the default request version for all incoming requests that do not have a version specified. +* +* @this {FileService} +* @param {object} serviceProperties The service properties. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information +* if an error occurs; otherwise, `response` +* will contain information related to this operation. +*/ +FileService.prototype.setServiceProperties = function (serviceProperties, optionsOrCallback, callback) { + return this.setAccountServiceProperties(serviceProperties, optionsOrCallback, callback); +}; + +// Share methods + +/** +* Lists a segment containing a collection of share items under the specified account. +* +* @this {FileService} +* @param {object} currentToken A continuation token returned by a previous listing operation. Please use 'null' or 'undefined' if this is the first operation. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.maxResults] Specifies the maximum number of shares to return per call to Azure storage. +* @param {string} [options.include] Include this parameter to specify that the share's metadata be returned as part of the response body. (allowed values: '', 'metadata', 'snapshots' or any combination of them) +* **Note** that all metadata names returned from the server will be converted to lower case by NodeJS itself as metadata is set via HTTP headers and HTTP header names are case insensitive. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will contain `entries` and `continuationToken`. +* `entries` gives a list of `[shares]{@link ShareResult}` and the `continuationToken` is used for the next listing operation. +* `response` will contain information related to this operation. +*/ +FileService.prototype.listSharesSegmented = function (currentToken, optionsOrCallback, callback) { + this.listSharesSegmentedWithPrefix(null /* prefix */, currentToken, optionsOrCallback, callback); +}; + +/** +* Lists a segment containing a collection of share items whose names begin with the specified prefix under the specified account. +* +* @this {FileService} +* @param {string} prefix The prefix of the share name. +* @param {object} currentToken A continuation token returned by a previous listing operation. Please use 'null' or 'undefined' if this is the first operation. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {string} [options.prefix] Filters the results to return only shares whose name begins with the specified prefix. +* @param {int} [options.maxResults] Specifies the maximum number of shares to return per call to Azure storage. +* @param {string} [options.include] Include this parameter to specify that the share's metadata be returned as part of the response body. (allowed values: '', 'metadata', 'snapshots' or any combination of them) +* **Note** that all metadata names returned from the server will be converted to lower case by NodeJS itself as metadata is set via HTTP headers and HTTP header names are case insensitive. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will contain `entries` and `continuationToken`. +* `entries` gives a list of `[shares]{@link ShareResult}` and the `continuationToken` is used for the next listing operation. +* `response` will contain information related to this operation. +*/ +FileService.prototype.listSharesSegmentedWithPrefix = function (prefix, currentToken, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('listShares', function (v) { + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.get() + .withQueryOption(QueryStringConstants.COMP, 'list') + .withQueryOption(QueryStringConstants.MAX_RESULTS, options.maxResults) + .withQueryOption(QueryStringConstants.INCLUDE, options.include); + + if (!azureutil.objectIsNull(currentToken)) { + webResource.withQueryOption(QueryStringConstants.MARKER, currentToken.nextMarker); + } + + webResource.withQueryOption(QueryStringConstants.PREFIX, prefix); + + //options.requestLocationMode = azureutil.getNextListingLocationMode(currentToken); + + var processResponseCallback = function (responseObject, next) { + responseObject.listSharesResult = null; + + if (!responseObject.error) { + responseObject.listSharesResult = { + entries: null, + continuationToken: null + }; + responseObject.listSharesResult.entries = []; + + var shares = []; + + if (responseObject.response.body.EnumerationResults.Shares && responseObject.response.body.EnumerationResults.Shares.Share) { + shares = responseObject.response.body.EnumerationResults.Shares.Share; + if (!_.isArray(shares)) { + shares = [shares]; + } + } + + shares.forEach(function (currentShare) { + var shareResult = ShareResult.parse(currentShare); + responseObject.listSharesResult.entries.push(shareResult); + }); + + if (responseObject.response.body.EnumerationResults.NextMarker) { + responseObject.listSharesResult.continuationToken = { + nextMarker: null, + targetLocation: null + }; + + responseObject.listSharesResult.continuationToken.nextMarker = responseObject.response.body.EnumerationResults.NextMarker; + responseObject.listSharesResult.continuationToken.targetLocation = responseObject.targetLocation; + } + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.listSharesResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Checks whether or not a share exists on the service. +* +* @this {FileService} +* @param {string} share The share name. +* @param {object} [options] The request options. +* @param {string} [options.shareSnapshotId] The snapshot identifier of the share. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link ShareResult}` will contain +* the share information including `exists` boolean member. +* `response` will contain information related to this operation. +*/ +FileService.prototype.doesShareExist = function (share, optionsOrCallback, callback) { + this._doesShareExist(share, false, optionsOrCallback, callback); +}; + +/** +* Creates a new share under the specified account. +* If a share with the same name already exists, the operation fails. +* +* @this {FileService} +* @param {string} share The share name. +* @param {object} [options] The request options. +* @param {int} [options.quota] Specifies the maximum size of the share, in gigabytes. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link ShareResult}` will contain +* the share information. +* `response` will contain information related to this operation. +*/ +FileService.prototype.createShare = function (share, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('createShare', function (v) { + v.string(share, 'share'); + v.shareNameIsValid(share); + v.shareQuotaIsValid(userOptions.quota); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.put(share) + .withQueryOption(QueryStringConstants.RESTYPE, 'share') + .withHeader(HeaderConstants.SHARE_QUOTA, options.quota); + + webResource.addOptionalMetadataHeaders(options.metadata); + + var processResponseCallback = function (responseObject, next) { + responseObject.shareResult = null; + if (!responseObject.error) { + responseObject.shareResult = new ShareResult(share); + responseObject.shareResult.getPropertiesFromHeaders(responseObject.response.headers); + + if (options.metadata) { + responseObject.shareResult.metadata = options.metadata; + } + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.shareResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Creates a share snapshot. +* +* @this {FileService} +* @param {string} share The share name. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will contain +* the ID of the snapshot. +* `response` will contain information related to this operation. +*/ +FileService.prototype.createShareSnapshot = function (share, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('createShareSnapshot', function (v) { + v.string(share, 'share'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.put(share) + .withQueryOption(QueryStringConstants.RESTYPE, 'share') + .withQueryOption(QueryStringConstants.COMP, QueryStringConstants.SNAPSHOT); + webResource.addOptionalMetadataHeaders(options.metadata); + + var processResponseCallback = function (responseObject, next) { + responseObject.snapshotId = null; + if (!responseObject.error) { + responseObject.snapshotId = responseObject.response.headers[HeaderConstants.SNAPSHOT]; + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.snapshotId, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Creates a new share under the specified account if the share does not exists. +* +* @this {FileService} +* @param {string} share The share name. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link ShareResult}` will contain +* the share information including `created` boolean member. +* `response` will contain information related to this operation. +* +* @example +* var azure = require('azure-storage'); +* var FileService = azure.createFileService(); +* FileService.createShareIfNotExists('taskshare', function(error) { +* if(!error) { +* // Share created or already existed +* } +* }); +*/ +FileService.prototype.createShareIfNotExists = function (share, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('createShareIfNotExists', function (v) { + v.string(share, 'share'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + delete options.shareSnapshotId; + + var self = this; + self._doesShareExist(share, true, options, function(error, result, response) { + var exists = result.exists; + result.created = false; + delete result.exists; + + if(error){ + callback(error, result, response); + } else if (exists) { + response.isSuccessful = true; + callback(error, result, response); + } else { + self.createShare(share, options, function (createError, responseShare, createResponse) { + if(!createError){ + responseShare.created = true; + } + else if (createError && createError.statusCode === HttpConstants.HttpResponseCodes.Conflict && createError.code === Constants.FileErrorCodeStrings.SHARE_ALREADY_EXISTS) { + // If it was created before, there was no actual error. + createError = null; + createResponse.isSuccessful = true; + } + + callback(createError, responseShare, createResponse); + }); + } + }); +}; + +/** +* Retrieves a share and its properties from a specified account. +* **Note** that all metadata names returned from the server will be converted to lower case by NodeJS itself as metadata is set via HTTP headers and HTTP header names are case insensitive. + + +* +* @this {FileService} +* @param {string} share The share name. +* @param {object} [options] The request options. +* @param {string} [options.shareSnapshotId] The snapshot identifier of the share. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link ShareResult}` will contain +* information for the share. +* `response` will contain information related to this operation. +*/ +FileService.prototype.getShareProperties = function (share, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('getShareProperties', function (v) { + v.string(share, 'share'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.head(share) + .withQueryOption(QueryStringConstants.RESTYPE, 'share') + .withQueryOption(QueryStringConstants.SHARE_SNAPSHOT, options.shareSnapshotId); + + //options.requestLocationMode = Constants.RequestLocationMode.PRIMARY_OR_SECONDARY; + + var self = this; + var processResponseCallback = function (responseObject, next) { + responseObject.shareResult = null; + if (!responseObject.error) { + responseObject.shareResult = new ShareResult(share); + responseObject.shareResult.metadata = self.parseMetadataHeaders(responseObject.response.headers); + responseObject.shareResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.shareResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Sets the properties for the specified share. +* +* @this {FileService} +* @param {string} share The share name. +* @param {object} [properties] The share properties to set. +* @param {string|int} [properties.quota] Specifies the maximum size of the share, in gigabytes. +* @param {object} [options] The request options. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[share]{@link ShareResult}` will contain +* information about the share. +* `response` will contain information related to this operation. +*/ +FileService.prototype.setShareProperties = function (share, properties, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('setShareProperties', function (v) { + v.string(share, 'share'); + v.shareNameIsValid(share); + v.shareQuotaIsValid(userOptions.quota); + v.callback(callback); + }); + + var options = extend(true, properties, userOptions); + var resourceName = createResourceName(share); + var webResource = WebResource.put(resourceName) + .withQueryOption(QueryStringConstants.RESTYPE, 'share') + .withQueryOption(QueryStringConstants.COMP, 'properties') + .withHeader(HeaderConstants.SHARE_QUOTA, options.quota); + + FileResult.setProperties(webResource, options); + + var processResponseCallback = function (responseObject, next) { + responseObject.shareResult = null; + if (!responseObject.error) { + responseObject.shareResult = new ShareResult(share); + responseObject.shareResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.shareResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Gets the share statistics for a share. +* +* @this {FileService} +* @param {string} share The share name. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; otherwise, `[result]{@link ServiceStats}` will contain the stats and +* `response` will contain information related to this operation. +*/ +FileService.prototype.getShareStats = function (share, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('getShareStats', function (v) { + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var resourceName = createResourceName(share); + var webResource = WebResource.get(resourceName) + .withQueryOption(QueryStringConstants.RESTYPE, 'share') + .withQueryOption(QueryStringConstants.COMP, 'stats'); + + var processResponseCallback = function (responseObject, next) { + responseObject.shareResult = null; + if (!responseObject.error) { + responseObject.shareResult = ShareResult.parse(responseObject.response.body, share); + responseObject.shareResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + // function to be called after all filters + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.shareResult, returnObject.response); + }; + + // call the first filter + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + + +/** +* Returns all user-defined metadata for the share. +* **Note** that all metadata names returned from the server will be converted to lower case by NodeJS itself as metadata is set via HTTP headers and HTTP header names are case insensitive. +* +* @this {FileService} +* @param {string} share The share name. +* @param {object} [options] The request options. +* @param {string} [options.shareSnapshotId] The snapshot identifier of the share. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link ShareResult}` will contain +* information for the share. +* `response` will contain information related to this operation. +*/ +FileService.prototype.getShareMetadata = function (share, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('getShareMetadata', function (v) { + v.string(share, 'share'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.head(share) + .withQueryOption(QueryStringConstants.RESTYPE, 'share') + .withQueryOption(QueryStringConstants.COMP, 'metadata') + .withQueryOption(QueryStringConstants.SHARE_SNAPSHOT, options.shareSnapshotId); + + var self = this; + var processResponseCallback = function (responseObject, next) { + responseObject.shareResult = null; + if (!responseObject.error) { + responseObject.shareResult = new ShareResult(share); + responseObject.shareResult.metadata = self.parseMetadataHeaders(responseObject.response.headers); + responseObject.shareResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.shareResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Sets the share's metadata. +* +* Calling the Set Share Metadata operation overwrites all existing metadata that is associated with the share. +* It's not possible to modify an individual name/value pair. +* +* @this {FileService} +* @param {string} share The share name. +* @param {object} metadata The metadata key/value pairs. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information +* if an error occurs; otherwise +* `response` will contain information related to this operation. +*/ +FileService.prototype.setShareMetadata = function (share, metadata, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('setShareMetadata', function (v) { + v.string(share, 'share'); + v.object(metadata, 'metadata'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.put(share) + .withQueryOption(QueryStringConstants.RESTYPE, 'share') + .withQueryOption(QueryStringConstants.COMP, 'metadata'); + + webResource.addOptionalMetadataHeaders(metadata); + + var processResponseCallback = function (responseObject, next) { + responseObject.shareResult = null; + if (!responseObject.error) { + responseObject.shareResult = new ShareResult(share); + responseObject.shareResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.shareResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Gets the share's ACL. +* +* @this {FileService} +* @param {string} share The share name. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link ShareAclResult}` will contain +* information for the share. +* `response` will contain information related to this operation. +*/ +FileService.prototype.getShareAcl = function (share, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('getShareAcl', function (v) { + v.string(share, 'share'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.get(share) + .withQueryOption(QueryStringConstants.RESTYPE, 'share') + .withQueryOption(QueryStringConstants.COMP, 'acl'); + + options.requestLocationMode = Constants.RequestLocationMode.PRIMARY_OR_SECONDARY; + + var processResponseCallback = function (responseObject, next) { + responseObject.shareResult = null; + if (!responseObject.error) { + responseObject.shareResult = new ShareResult(share); + responseObject.shareResult.getPropertiesFromHeaders(responseObject.response.headers); + responseObject.shareResult.signedIdentifiers = AclResult.parse(responseObject.response.body); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.shareResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Updates the share's ACL. +* +* @this {FileService} +* @param {string} share The share name. +* @param {Object.} signedIdentifiers The share ACL settings. See `[AccessPolicy]{@link AccessPolicy}` for detailed information. +* @param {object} [options] The request options. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link ShareAclResult}` will contain +* information for the share. +* `response` will contain information related to this operation. +*/ +FileService.prototype.setShareAcl = function (share, signedIdentifiers, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('setShareAcl', function (v) { + v.string(share, 'share'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + var policies = null; + if (signedIdentifiers) { + if(_.isArray(signedIdentifiers)) { + throw new TypeError(SR.INVALID_SIGNED_IDENTIFIERS); + } + policies = AclResult.serialize(signedIdentifiers); + } + + var webResource = WebResource.put(share) + .withQueryOption(QueryStringConstants.RESTYPE, 'share') + .withQueryOption(QueryStringConstants.COMP, 'acl') + .withHeader(HeaderConstants.CONTENT_LENGTH, !azureutil.objectIsNull(policies) ? Buffer.byteLength(policies) : 0) + .withBody(policies); + + var processResponseCallback = function (responseObject, next) { + responseObject.shareResult = null; + if (!responseObject.error) { + responseObject.shareResult = new ShareResult(share); + responseObject.shareResult.getPropertiesFromHeaders(responseObject.response.headers); + if (signedIdentifiers) { + responseObject.shareResult.signedIdentifiers = signedIdentifiers; + } + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.shareResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, webResource.body, options, processResponseCallback); +}; + +/** +* Marks the specified share for deletion. +* The share and any files contained within it are later deleted during garbage collection. +* +* @this {FileService} +* @param {string} share The share name. +* @param {object} [options] The request options. +* @param {string} [options.deleteSnapshots] The snapshot delete option. See azure.FileUtilities.ShareSnapshotDeleteOptions.*. +* @param {string} [options.shareSnapshotId] The share snapshot identifier. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information +* if an error occurs; otherwise +* `response` will contain information related to this operation. +*/ +FileService.prototype.deleteShare = function (share, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('deleteShare', function (v) { + v.string(share, 'share'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + if (!azureutil.objectIsNull(options.shareSnapshotId) && !azureutil.objectIsNull(options.deleteSnapshots)) { + throw new ArgumentError('options', SR.INVALID_DELETE_SNAPSHOT_OPTION); + } + + var webResource = WebResource.del(share) + .withQueryOption(QueryStringConstants.RESTYPE, 'share') + .withQueryOption(QueryStringConstants.SHARE_SNAPSHOT, options.shareSnapshotId) + .withHeader(HeaderConstants.DELETE_SNAPSHOT, options.deleteSnapshots); + + var processResponseCallback = function (responseObject, next) { + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Marks the specified share for deletion if it exists. +* The share and any files contained within it are later deleted during garbage collection. +* +* @this {FileService} +* @param {string} share The share name. +* @param {object} [options] The request options. +* @param {string} [options.deleteSnapshots] The snapshot delete option. See azure.FileUtilities.ShareSnapshotDeleteOptions.*. +* @param {string} [options.shareSnapshotId] The share snapshot identifier. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will +* be true if the share exists and was deleted, or false if the share +* did not exist. +* `response` will contain information related to this operation. +*/ +FileService.prototype.deleteShareIfExists = function (share, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('deleteShareIfExists', function (v) { + v.string(share, 'share'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var self = this; + self._doesShareExist(share, true, options, function (error, result, response) { + if(error){ + callback(error, result.exists, response); + } else if (!result.exists) { + response.isSuccessful = true; + callback(error, false, response); + } else { + self.deleteShare(share, options, function (deleteError, deleteResponse) { + var deleted; + if (!deleteError){ + deleted = true; + } else if (deleteError && deleteError.statuscode === HttpConstants.HttpResponseCodes.NotFound && deleteError.code === Constants.FileErrorCodeStrings.SHARE_NOT_FOUND) { + // If it was deleted already, there was no actual error. + deleted = false; + deleteError = null; + deleteResponse.isSuccessful = true; + } + + callback(deleteError, deleted, deleteResponse); + }); + } + }); +}; + +// Directory methods + +/** +* Checks whether or not a directory exists on the service. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {object} [options] The request options. +* @param {string} [options.shareSnapshotId] The share snapshot identifier. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link DirectoryResult}` will contain +* the directory information including `exists` boolean member. +* `response` will contain information related to this operation. +*/ +FileService.prototype.doesDirectoryExist = function (share, directory, optionsOrCallback, callback) { + this._doesDirectoryExist(share, directory, false, optionsOrCallback, callback); +}; + +/** +* Creates a new directory under the specified account. +* If a directory with the same name already exists, the operation fails. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. +* @param {object} [options] The request options. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link DirectoryResult}` will contain +* the directory information. +* `response` will contain information related to this operation. +*/ +FileService.prototype.createDirectory = function (share, directory, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('createDirectory', function (v) { + v.string(share, 'share'); + v.string(directory, 'directory'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.put(createResourceName(share, directory)) + .withQueryOption(QueryStringConstants.RESTYPE, 'directory'); + + webResource.addOptionalMetadataHeaders(options.metadata); + + var processResponseCallback = function (responseObject, next) { + responseObject.directoryResult = null; + if (!responseObject.error) { + responseObject.directoryResult = new DirectoryResult(directory); + responseObject.directoryResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.directoryResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Creates a new directory under the specified account if the directory does not exists. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. +* @param {object} [options] The request options. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link DirectoryResult}` will contain +* the directory information including `created` boolean member. +* already exists. +* `response` will contain information related to this operation. +* +* @example +* var azure = require('azure-storage'); +* var FileService = azure.createFileService(); +* FileService.createDirectoryIfNotExists('taskshare', taskdirectory', function(error) { +* if(!error) { +* // Directory created or already existed +* } +* }); +*/ +FileService.prototype.createDirectoryIfNotExists = function (share, directory, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('createDirectoryIfNotExists', function (v) { + v.string(share, 'share'); + v.string(directory, 'directory'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + delete options.shareSnapshotId; + + var self = this; + self._doesDirectoryExist(share, directory, true, options, function(error, result, response) { + var exists = result.exists; + result.created = false; + delete result.exists; + + if(error){ + callback(error, result, response); + } else if (exists) { + response.isSuccessful = true; + callback(error, result, response); + } else { + self.createDirectory(share, directory, options, function (createError, responseDirectory, createResponse) { + if(!createError){ + responseDirectory.created = true; + } + else if (createError && createError.statusCode === HttpConstants.HttpResponseCodes.Conflict && createError.code === Constants.StorageErrorCodeStrings.RESOURCE_ALREADY_EXISTS) { + // If it was created before, there was no actual error. + createError = null; + createResponse.isSuccessful = true; + } + + callback(createError, responseDirectory, createResponse); + }); + } + }); +}; + +/** +* Retrieves a directory and its properties from a specified account. +* **Note** that all metadata names returned from the server will be converted to lower case by NodeJS itself as metadata is set via HTTP headers and HTTP header names are case insensitive. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {object} [options] The request options. +* @param {string} [options.shareSnapshotId] The snapshot identifier of the share. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link DirectoryResult}` will contain +* information for the directory. +* `response` will contain information related to this operation. +*/ +FileService.prototype.getDirectoryProperties = function (share, directory, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('getDirectoryProperties', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.head(createResourceName(share, directory)) + .withQueryOption(QueryStringConstants.RESTYPE, 'directory') + .withQueryOption(QueryStringConstants.SHARE_SNAPSHOT, options.shareSnapshotId); + + var self = this; + var processResponseCallback = function (responseObject, next) { + responseObject.directoryResult = null; + if (!responseObject.error) { + responseObject.directoryResult = new DirectoryResult(directory); + responseObject.directoryResult.metadata = self.parseMetadataHeaders(responseObject.response.headers); + responseObject.directoryResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.directoryResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Marks the specified directory for deletion. The directory must be empty before it can be deleted. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information +* if an error occurs; otherwise +* `response` will contain information related to this operation. +*/ +FileService.prototype.deleteDirectory = function (share, directory, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('deleteDirectory', function (v) { + v.string(share, 'share'); + v.string(directory, 'directory'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.del(createResourceName(share, directory)) + .withQueryOption(QueryStringConstants.RESTYPE, 'directory'); + + var processResponseCallback = function (responseObject, next) { + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Marks the specified directory for deletion if it exists. The directory must be empty before it can be deleted. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will +* be true if the directory exists and was deleted, or false if the directory +* did not exist. +* `response` will contain information related to this operation. +*/ +FileService.prototype.deleteDirectoryIfExists = function (share, directory, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('deleteDirectoryIfExists', function (v) { + v.string(share, 'share'); + v.string(directory, 'directory'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + delete options.shareSnapshotId; + + var self = this; + self._doesDirectoryExist(share, directory, true, options, function(error, result, response) { + if(error){ + callback(error, result.exists, response); + } else if (!result.exists) { + response.isSuccessful = true; + callback(error, false, response); + } else { + self.deleteDirectory(share, directory, options, function (deleteError, deleteResponse) { + var deleted; + if (!deleteError){ + deleted = true; + } else if (deleteError && deleteError.statuscode === HttpConstants.HttpResponseCodes.NotFound && deleteError.code === Constants.StorageErrorCodeStrings.RESOURCE_NOT_FOUND) { + // If it was deleted already, there was no actual error. + deleted = false; + deleteError = null; + deleteResponse.isSuccessful = true; + } + + callback(deleteError, deleted, deleteResponse); + }); + } + }); +}; + +/** +* Lists a segment containing a collection of file items in the directory. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {object} currentToken A continuation token returned by a previous listing operation. Please use 'null' or 'undefined' if this is the first operation. +* @param {object} [options] The request options. +* @param {string} [options.shareSnapshotId] The snapshot identifier of the share. +* @param {int} [options.maxResults] Specifies the maximum number of files to return per call to Azure ServiceClient. This does NOT affect list size returned by this function. (maximum: 5000) +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will contain +* entries.files which contains a list of `[files]{@link FileResult}`, entries.directories which contains a list of `[directories]{@link DirectoryResult}` and the continuationToken for the next listing operation. +* `response` will contain information related to this operation. +*/ +FileService.prototype.listFilesAndDirectoriesSegmented = function (share, directory, currentToken, optionsOrCallback, callback) { + this.listFilesAndDirectoriesSegmentedWithPrefix(share, directory, null /*prefix*/, currentToken, optionsOrCallback, callback); +}; + + +/** +* Lists a segment containing a collection of file items in the directory. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} prefix The prefix of the directory/files name. +* @param {object} currentToken A continuation token returned by a previous listing operation. Please use 'null' or 'undefined' if this is the first operation. +* @param {object} [options] The request options. +* @param {string} [options.shareSnapshotId] The snapshot identifier of the share. +* @param {int} [options.maxResults] Specifies the maximum number of files to return per call to Azure ServiceClient. This does NOT affect list size returned by this function. (maximum: 5000) +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will contain +* entries.files which contains a list of `[files]{@link FileResult}`, entries.directories which contains a list of `[directories]{@link DirectoryResult}` and the continuationToken for the next listing operation. +* `response` will contain information related to this operation. +*/ +FileService.prototype.listFilesAndDirectoriesSegmentedWithPrefix = function (share, directory, prefix, currentToken, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('listFilesSegmented', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.get(createResourceName(share, directory)) + .withQueryOption(QueryStringConstants.RESTYPE, 'directory') + .withQueryOption(QueryStringConstants.COMP, 'list') + .withQueryOption(QueryStringConstants.MAX_RESULTS, options.maxResults) + .withQueryOption(QueryStringConstants.SHARE_SNAPSHOT, options.shareSnapshotId); + + if (!azureutil.objectIsNull(currentToken)) { + webResource.withQueryOption(QueryStringConstants.MARKER, currentToken.nextMarker); + } + + webResource.withQueryOption(QueryStringConstants.PREFIX, prefix); + + var processResponseCallback = function (responseObject, next) { + responseObject.listResult = null; + if (!responseObject.error) { + responseObject.listResult = { + entries: null, + continuationToken: null + }; + + responseObject.listResult.entries = {}; + responseObject.listResult.entries.files = []; + responseObject.listResult.entries.directories = []; + var files = []; + var directories = []; + + // parse files + if (responseObject.response.body.EnumerationResults.Entries.File) { + files = responseObject.response.body.EnumerationResults.Entries.File; + if (!_.isArray(files)) { + files = [ files ]; + } + } + + files.forEach(function (currentFile) { + var fileResult = FileResult.parse(currentFile); + responseObject.listResult.entries.files.push(fileResult); + }); + + // parse directories + if (responseObject.response.body.EnumerationResults.Entries.Directory) { + directories = responseObject.response.body.EnumerationResults.Entries.Directory; + if (!_.isArray(directories)) { + directories = [ directories ]; + } + } + + directories.forEach(function (currentDirectory) { + var directoryResult = DirectoryResult.parse(currentDirectory); + responseObject.listResult.entries.directories.push(directoryResult); + }); + + // parse continuation token + if(responseObject.response.body.EnumerationResults.NextMarker) { + responseObject.listResult.continuationToken = { + nextMarker: null, + targetLocation: null + }; + + responseObject.listResult.continuationToken.nextMarker = responseObject.response.body.EnumerationResults.NextMarker; + responseObject.listResult.continuationToken.targetLocation = responseObject.targetLocation; + } + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.listResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Returns all user-defined metadata for the specified directory. +* **Note** that all metadata names returned from the server will be converted to lower case by NodeJS itself as metadata is set via HTTP headers and HTTP header names are case insensitive. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {object} [options] The request options. +* @param {string} [options.shareSnapshotId] The snapshot identifier of the share. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link DirectoryResult}` will contain +* information about the directory. +* `response` will contain information related to this operation. +*/ +FileService.prototype.getDirectoryMetadata = function (share, directory, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('getDirectoryMetadata', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var resourceName = createResourceName(share, directory); + var webResource = WebResource.head(resourceName) + .withQueryOption(QueryStringConstants.RESTYPE, 'directory') + .withQueryOption(QueryStringConstants.COMP, 'metadata') + .withQueryOption(QueryStringConstants.SHARE_SNAPSHOT, options.shareSnapshotId); + + var self = this; + var processResponseCallback = function (responseObject, next) { + responseObject.directoryResult = null; + if (!responseObject.error) { + responseObject.directoryResult = new DirectoryResult(directory); + responseObject.directoryResult.metadata = self.parseMetadataHeaders(responseObject.response.headers); + responseObject.directoryResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.directoryResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Sets user-defined metadata for the specified directory as one or more name-value pairs +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {object} metadata The metadata key/value pairs. +* @param {object} [options] The request options. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link DirectoryResult}` will contain +* information on the directory. +* `response` will contain information related to this operation. +*/ +FileService.prototype.setDirectoryMetadata = function (share, directory, metadata, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('setDirectoryMetadata', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.shareNameIsValid(share); + v.object(metadata, 'metadata'); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var resourceName = createResourceName(share, directory); + var webResource = WebResource.put(resourceName) + .withQueryOption(QueryStringConstants.RESTYPE, 'directory') + .withQueryOption(QueryStringConstants.COMP, 'metadata'); + + webResource.addOptionalMetadataHeaders(metadata); + + var processResponseCallback = function (responseObject, next) { + responseObject.directoryResult = null; + if (!responseObject.error) { + responseObject.directoryResult = new DirectoryResult(directory); + responseObject.directoryResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.directoryResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +// File methods + +/** +* Retrieves a shared access signature token. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} [directory] The directory name. Use '' to refer to the base directory. +* @param {string} [file] The file name. +* @param {object} sharedAccessPolicy The shared access policy. +* @param {string} [sharedAccessPolicy.Id] The signed identifier. +* @param {object} [sharedAccessPolicy.AccessPolicy.Permissions] The permission type. +* @param {date|string} [sharedAccessPolicy.AccessPolicy.Start] The time at which the Shared Access Signature becomes valid (The UTC value will be used). +* @param {date|string} [sharedAccessPolicy.AccessPolicy.Expiry] The time at which the Shared Access Signature becomes expired (The UTC value will be used). +* @param {string} [sharedAccessPolicy.AccessPolicy.IPAddressOrRange] An IP address or a range of IP addresses from which to accept requests. When specifying a range, note that the range is inclusive. +* @param {string} [sharedAccessPolicy.AccessPolicy.Protocols] The protocols permitted for a request made with the account SAS. +* Possible values are both HTTPS and HTTP (https,http) or HTTPS only (https). The default value is https,http. +* @param {object} [headers] The optional header values to set for a file returned wth this SAS. +* @param {string} [headers.cacheControl] The optional value of the Cache-Control response header to be returned when this SAS is used. +* @param {string} [headers.contentType] The optional value of the Content-Type response header to be returned when this SAS is used. +* @param {string} [headers.contentEncoding] The optional value of the Content-Encoding response header to be returned when this SAS is used. +* @param {string} [headers.contentLanguage] The optional value of the Content-Language response header to be returned when this SAS is used. +* @param {string} [headers.contentDisposition] The optional value of the Content-Disposition response header to be returned when this SAS is used. +* @return {string} The shared access signature query string. Note this string does not contain the leading "?". +*/ +FileService.prototype.generateSharedAccessSignature = function (share, directory, file, sharedAccessPolicy, headers) { + // check if the FileService is able to generate a shared access signature + if (!this.storageCredentials || !this.storageCredentials.generateSignedQueryString) { + throw new Error(SR.CANNOT_CREATE_SAS_WITHOUT_ACCOUNT_KEY); + } + + // Validate share name. File name is optional. + validate.validateArgs('generateSharedAccessSignature', function (v) { + v.string(share, 'share'); + v.shareNameIsValid(share); + v.object(sharedAccessPolicy, 'sharedAccessPolicy'); + }); + + var resourceType = FileConstants.ResourceTypes.SHARE; + if (file) { + validate.validateArgs('generateSharedAccessSignature', function (v) { + v.stringAllowEmpty(directory, 'directory'); + v.string(file, 'file'); + }); + resourceType = FileConstants.ResourceTypes.FILE; + } else { + directory = ''; // If file is not set, directory is not a part of the string to sign. + } + + if (sharedAccessPolicy.AccessPolicy) { + if (!azureutil.objectIsNull(sharedAccessPolicy.AccessPolicy.Start)) { + if (!_.isDate(sharedAccessPolicy.AccessPolicy.Start)) { + sharedAccessPolicy.AccessPolicy.Start = new Date(sharedAccessPolicy.AccessPolicy.Start); + } + + sharedAccessPolicy.AccessPolicy.Start = azureutil.truncatedISO8061Date(sharedAccessPolicy.AccessPolicy.Start); + } + + if (!azureutil.objectIsNull(sharedAccessPolicy.AccessPolicy.Expiry)) { + if (!_.isDate(sharedAccessPolicy.AccessPolicy.Expiry)) { + sharedAccessPolicy.AccessPolicy.Expiry = new Date(sharedAccessPolicy.AccessPolicy.Expiry); + } + + sharedAccessPolicy.AccessPolicy.Expiry = azureutil.truncatedISO8061Date(sharedAccessPolicy.AccessPolicy.Expiry); + } + } + + var resourceName = createResourceName(share, directory, file, true); + return this.storageCredentials.generateSignedQueryString(Constants.ServiceType.File, resourceName, sharedAccessPolicy, null, { headers: headers, resourceType: resourceType }); +}; + +/** +* Retrieves a file or directory URL. +* +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} [file] The file name. File names may not start or end with the delimiter '/'. +* @param {string} [sasToken] The Shared Access Signature token. +* @param {boolean} [primary] A boolean representing whether to use the primary or the secondary endpoint. +* @param {string} [shareSnapshotId] The snapshot identifier of the share. +* @return {string} The formatted URL string. +* @example +* var azure = require('azure-storage'); +* var fileService = azure.createFileService(); +* var sharedAccessPolicy = { +* AccessPolicy: { +* Permissions: azure.FileUtilities.SharedAccessPermissions.READ, +* Start: startDate, +* Expiry: expiryDate +* }, +* }; +* +* var sasToken = fileService.generateSharedAccessSignature(shareName, directoryName, fileName, sharedAccessPolicy); +* var url = fileService.getUrl(shareName, directoryName, fileName, sasToken, true); +*/ +FileService.prototype.getUrl = function (share, directory, file, sasToken, primary, shareSnapshotId) { + validate.validateArgs('getUrl', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.shareNameIsValid(share); + }); + + var host; + if(!azureutil.objectIsNull(primary) && primary === false) { + host = this.host.secondaryHost; + } else { + host = this.host.primaryHost; + } + host = azureutil.trimPortFromUri(host); + if(host && host.lastIndexOf('/') !== (host.length - 1)){ + host = host + '/'; + } + + var name = createResourceName(share, directory, file); + var query = qs.parse(sasToken); + if(shareSnapshotId) { + query[QueryStringConstants.SHARE_SNAPSHOT] = shareSnapshotId; + } + return url.resolve(host, url.format({pathname: this._getPath(name), query: query})); +}; + +/** +* Returns all user-defined metadata, standard HTTP properties, and system properties for the file. +* It does not return or modify the content of the file. +* **Note** that all metadata names returned from the server will be converted to lower case by NodeJS itself as metadata is set via HTTP headers and HTTP header names are case insensitive. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} file The file name. File names may not start or end with the delimiter '/'. +* @param {object} [options] The request options. +* @param {string} [options.shareSnapshotId] The snapshot identifier of the share. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link FileResult}` will contain +* information about the file. +* `response` will contain information related to this operation. +*/ +FileService.prototype.getFileProperties = function (share, directory, file, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('getFileProperties', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.string(file, 'file'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var resourceName = createResourceName(share, directory, file); + var webResource = WebResource.head(resourceName) + .withQueryOption(QueryStringConstants.SHARE_SNAPSHOT, options.shareSnapshotId); + + var self = this; + var processResponseCallback = function (responseObject, next) { + responseObject.fileResult = null; + if (!responseObject.error) { + responseObject.fileResult = new FileResult(share, directory, file); + responseObject.fileResult.metadata = self.parseMetadataHeaders(responseObject.response.headers); + responseObject.fileResult.getPropertiesFromHeaders(responseObject.response.headers, true); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.fileResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Returns all user-defined metadata for the specified file. +* It does not modify or return the content of the file. +* **Note** that all metadata names returned from the server will be converted to lower case by NodeJS itself as metadata is set via HTTP headers and HTTP header names are case insensitive. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} file The file name. File names may not start or end with the delimiter '/'. +* @param {object} [options] The request options. +* @param {string} [options.shareSnapshotId] The snapshot identifier of the share. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link FileResult}` will contain +* information about the file. +* `response` will contain information related to this operation. +*/ +FileService.prototype.getFileMetadata = function (share, directory, file, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('getFileMetadata', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.string(file, 'file'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var resourceName = createResourceName(share, directory, file); + var webResource = WebResource.head(resourceName) + .withQueryOption(QueryStringConstants.COMP, 'metadata') + .withQueryOption(QueryStringConstants.SHARE_SNAPSHOT, options.shareSnapshotId); + + var self = this; + var processResponseCallback = function (responseObject, next) { + responseObject.fileResult = null; + if (!responseObject.error) { + responseObject.fileResult = new FileResult(share, directory, file); + responseObject.fileResult.metadata = self.parseMetadataHeaders(responseObject.response.headers); + responseObject.fileResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.fileResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Sets user-defined properties for the specified file. +* It does not modify or return the content of the file. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} file The file name. File names may not start or end with the delimiter '/'. +* @param {object} [properties] The file properties to set. +* @param {string} [properties.contentType] The MIME content type of the file. The default type is application/octet-stream. +* @param {string} [properties.contentEncoding] The content encodings that have been applied to the file. +* @param {string} [properties.contentLanguage] The natural languages used by this resource. +* @param {string} [properties.cacheControl] The file's cache control. +* @param {string} [properties.contentDisposition] The file's content disposition. +* @param {string} [properties.contentLength] Resizes a file to the specified size. If the specified byte value is less than the current size of the file, +* then all ranges above the specified byte value are cleared. +* @param {string} [properties.contentMD5] The file's MD5 hash. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link FileResult}` will contain +* information about the file. +* `response` will contain information related to this operation. +*/ +FileService.prototype.setFileProperties = function (share, directory, file, properties, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('setFileProperties', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.string(file, 'file'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {contentSettings: properties, contentLength: properties.contentLength }, userOptions); + var resourceName = createResourceName(share, directory, file); + var webResource = WebResource.put(resourceName) + .withQueryOption(QueryStringConstants.COMP, 'properties'); + + FileResult.setProperties(webResource, options); + + var processResponseCallback = function(responseObject, next) { + responseObject.fileResult = null; + if (!responseObject.error) { + responseObject.fileResult = new FileResult(share, directory, file); + responseObject.fileResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function(returnObject) { + callback(returnObject.error, returnObject.fileResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Sets user-defined metadata for the specified file as one or more name-value pairs +* It does not modify or return the content of the file. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} file The file name. File names may not start or end with the delimiter '/'. +* @param {object} metadata The metadata key/value pairs. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link FileResult}` will contain +* information on the file. +* `response` will contain information related to this operation. +*/ +FileService.prototype.setFileMetadata = function (share, directory, file, metadata, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('setFileMetadata', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.string(file, 'file'); + v.shareNameIsValid(share); + v.object(metadata, 'metadata'); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var resourceName = createResourceName(share, directory, file); + var webResource = WebResource.put(resourceName) + .withQueryOption(QueryStringConstants.COMP, 'metadata'); + + webResource.addOptionalMetadataHeaders(metadata); + + var processResponseCallback = function (responseObject, next) { + responseObject.fileResult = null; + if (!responseObject.error) { + responseObject.fileResult = new FileResult(share, directory, file); + responseObject.fileResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.fileResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Resizes a file. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} file The file name. File names may not start or end with the delimiter '/'. +* @param {String} size The size of the file, in bytes. +* @param {object} [options] The request options. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link FileResult}` will contain +* information about the file. +* `response` will contain information related to this operation. +*/ +FileService.prototype.resizeFile = function (share, directory, file, size, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('resizeFile', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.string(file, 'file'); + v.shareNameIsValid(share); + v.value(size); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var resourceName = createResourceName(share, directory, file); + var webResource = WebResource.put(resourceName) + .withQueryOption(QueryStringConstants.COMP, 'properties'); + + webResource.withHeader(HeaderConstants.FILE_CONTENT_LENGTH, size); + + var processResponseCallback = function(responseObject, next) { + responseObject.fileResult = null; + if (!responseObject.error) { + responseObject.fileResult = new FileResult(share, directory, file); + responseObject.fileResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function(returnObject) { + callback(returnObject.error, returnObject.fileResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Checks whether or not a file exists on the service. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} file The file name. File names may not start or end with the delimiter '/'. +* @param {object} [options] The request options. +* @param {string} [options.shareSnapshotId] The snapshot identifier of the share. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {Function(error, result, response)} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link FileResult}` will contain +* the file information including the `exists` boolean member. +* `response` will contain information related to this operation. +*/ +FileService.prototype.doesFileExist = function (share, directory, file, optionsOrCallback, callback) { + this._doesFileExist(share, directory, file, false, optionsOrCallback, callback); +}; + +/** +* Creates a file of the specified length. If the file already exists on the service, it will be overwritten. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} file The file name. File names may not start or end with the delimiter '/'. +* @param {int} length The length of the file in bytes. +* @param {object} [options] The request options. +* @param {object} [options.contentSettings] The file's content settings. +* @param {string} [options.contentSettings.contentType] The MIME content type of the file. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the file. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The file service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The file's content disposition. +* @param {string} [options.contentSettings.contentMD5] The file's MD5 hash. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link FileResult}` will contain +* the file information. +* `response` will contain information related to this operation. +*/ +FileService.prototype.createFile = function (share, directory, file, length, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + validate.validateArgs('createFile', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.string(file, 'file'); + v.shareNameIsValid(share); + v.value(length); + v.callback(callback); + }); + + var resourceName = createResourceName(share, directory, file); + var options = extend(true, {}, userOptions); + var webResource = WebResource.put(resourceName) + .withHeader(HeaderConstants.TYPE, 'file') + .withHeader(HeaderConstants.FILE_CONTENT_LENGTH, length); + + FileResult.setProperties(webResource, options); + + var processResponseCallback = function(responseObject, next) { + responseObject.fileResult = null; + if (!responseObject.error) { + responseObject.fileResult = new FileResult(share, directory, file); + responseObject.fileResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function(returnObject) { + callback(returnObject.error, returnObject.fileResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Marks the specified file for deletion. The file is later deleted during garbage collection. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} file The file name. File names may not start or end with the delimiter '/'. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information +* if an error occurs; `response` will contain information related to this operation. +*/ +FileService.prototype.deleteFile = function (share, directory, file, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('deleteFile', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.string(file, 'file'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var resourceName = createResourceName(share, directory, file); + var webResource = WebResource.del(resourceName); + + var processResponseCallback = function (responseObject, next) { + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Marks the specified file for deletion if it exists. The file is later deleted during garbage collection. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} file The file name. File names may not start or end with the delimiter '/'. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will +* be true if the file was deleted, or false if the file +* does not exist. +* `response` will contain information related to this operation. +*/ +FileService.prototype.deleteFileIfExists = function (share, directory, file, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('deleteFileIfExists', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.string(file, 'file'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + delete options.shareSnapshotId; + + var self = this; + self._doesFileExist(share, directory, file, true, options, function(error, result, response) { + if(error){ + callback(error, result.exists, response); + } else if (!result.exists) { + response.isSuccessful = true; + callback(error, false, response); + } else { + self.deleteFile(share, directory, file, options, function (deleteError, deleteResponse) { + var deleted; + if (!deleteError){ + deleted = true; + } else if (deleteError && deleteError.statusCode === Constants.HttpConstants.HttpResponseCodes.NotFound && deleteError.code === Constants.FileErrorCodeStrings.FILE_NOT_FOUND) { + // If it was deleted already, there was no actual error. + deleted = false; + deleteError = null; + deleteResponse.isSuccessful = true; + } + + callback(deleteError, deleted, deleteResponse); + }); + } + }); +}; + +/** +* Downloads a file into a text string. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} file The file name. File names may not start or end with the delimiter '/'. +* @param {object} [options] The request options. +* @param {string} [options.shareSnapshotId] The snapshot identifier of the share. +* @param {int} [options.rangeStart] The range start. +* @param {int} [options.rangeEnd] The range end. +* @param {boolean} [options.disableContentMD5Validation] When set to true, MD5 validation will be disabled when downloading files. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {FileService~FileToText} callback `error` will contain information +* if an error occurs; otherwise `text` will contain the file contents, +* and `[file]{@link FileResult}` will contain the file information. +* `response` will contain information related to this operation. +*/ +FileService.prototype.getFileToText = function (share, directory, file, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('getFileToText', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.string(file, 'file'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var resourceName = createResourceName(share, directory, file); + var webResource = WebResource.get(resourceName) + .withQueryOption(QueryStringConstants.SHARE_SNAPSHOT, options.shareSnapshotId) + .withRawResponse(); + + FileResult.setHeaders(webResource, options); + this._setRangeContentMD5Header(webResource, options); + + var self = this; + var processResponseCallback = function (responseObject, next) { + responseObject.text = null; + responseObject.fileResult = null; + + if (!responseObject.error) { + responseObject.fileResult = new FileResult(share, directory, file); + responseObject.fileResult.getPropertiesFromHeaders(responseObject.response.headers, true); + responseObject.fileResult.metadata = self.parseMetadataHeaders(responseObject.response.headers); + responseObject.text = responseObject.response.body; + + self._validateLengthAndMD5(options, responseObject); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.text, returnObject.fileResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Provides a stream to read from a file. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} file The file name. File names may not start or end with the delimiter '/'. +* @param {object} [options] The request options. +* @param {string} [options.shareSnapshotId] The snapshot identifier of the share. +* @param {string} [options.rangeStart] Return only the bytes of the file in the specified range. +* @param {string} [options.rangeEnd] Return only the bytes of the file in the specified range. +* @param {boolean} [options.useTransactionalMD5] When set to true, Calculate and send/validate content MD5 for transactions. +* @param {boolean} [options.disableContentMD5Validation] When set to true, MD5 validation will be disabled when downloading files. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `[result]{@link FileResult}` will contain the file information. +* `response` will contain information related to this operation. +* @return {Readable} A Node.js Readable stream. +* @example +* var azure = require('azure-storage'); +* var fileService = azure.createFileService(); +* var writable = fs.createWriteStream(destinationFileNameTarget); +* fileService.createReadStream(shareName, directoryName, fileName).pipe(writable); +*/ +FileService.prototype.createReadStream = function (share, directory, file, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('createReadStream', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.string(file, 'file'); + v.shareNameIsValid(share); + }); + + var options = extend(true, {}, userOptions); + + var readStream = new ChunkStream(options); + this.getFileToStream(share, directory, file, readStream, options, function (error, fileResponse, response) { + if(error) { + readStream.emit('error', error); + } + + if(callback) { + callback(error, fileResponse, response); + } + }); + + return readStream; +}; + +/** +* Downloads a file into a stream. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} file The file name. File names may not start or end with the delimiter '/'. +* @param {Writable} writeStream The Node.js Writable stream. +* @param {object} [options] The request options. +* @param {string} [options.shareSnapshotId] The snapshot identifier of the share. +* @param {boolean} [options.skipSizeCheck] Skip the size check to perform direct download. +* Set the option to true for small files. +* Parallel download and speed summary won't work with this option on. +* @param {SpeedSummary} [options.speedSummary] The download tracker objects. +* @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. +* @param {string} [options.rangeStart] Return only the bytes of the file in the specified range. +* @param {string} [options.rangeEnd] Return only the bytes of the file in the specified range. +* @param {boolean} [options.useTransactionalMD5] When set to true, Calculate and send/validate content MD5 for transactions. +* @param {boolean} [options.disableContentMD5Validation] When set to true, MD5 validation will be disabled when downloading files. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `[result]{@link FileResult}` will contain the file information. +* `response` will contain information related to this operation. +* @return {SpeedSummary} +* +* +* @example +* var azure = require('azure-storage'); +* var FileService = azure.createFileService(); +* FileService.getFileToStream('taskshare', taskdirectory', 'task1', fs.createWriteStream('task1-download.txt'), function(error, serverFile) { +* if(!error) { +* // file available in serverFile.file variable +* } +* }); +*/ +FileService.prototype.getFileToStream = function (share, directory, file, writeStream, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + userOptions.speedSummary = userOptions.speedSummary || new SpeedSummary(file); + + validate.validateArgs('getFileToStream', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.string(file, 'file'); + v.object(writeStream, 'writeStream'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + var propertiesRequestOptions = { + timeoutIntervalInMs : options.timeoutIntervalInMs, + clientRequestTimeoutInMs : options.clientRequestTimeoutInMs, + accessConditions : options.accessConditions, + shareSnapshotId : options.shareSnapshotId + }; + + if (options.skipSizeCheck) { + this._getFileToStream(share, directory, file, writeStream, options, callback); + } else { + var self = this; + this.getFileProperties(share, directory, file, propertiesRequestOptions, function (error, properties) { + if (error) { + callback(error); + } else { + var size; + if (options.rangeStart) { + var endOffset = properties.contentLength - 1; + var end = options.rangeEnd ? Math.min(options.rangeEnd, endOffset) : endOffset; + size = end - options.rangeStart + 1; + } else { + size = properties.contentLength; + } + options.speedSummary.totalSize = size; + + if (size > self.singleFileThresholdInBytes) { + azureutil.setObjectInnerPropertyValue(options, ['contentSettings', 'contentMD5'], azureutil.tryGetValueChain(properties, ['contentSettings', 'contentMD5'], null)); + self._getFileToRangeStream(share, directory, file, writeStream, options, callback); + } else { + self._getFileToStream(share, directory, file, writeStream, options, callback); + } + } + }); + } + + return options.speedSummary; +}; + +/** +* Lists file ranges. Lists all of the ranges by default, or only the ranges over a specific range of bytes if rangeStart and rangeEnd are specified. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} file The file name. File names may not start or end with the delimiter '/'. +* @param {object} [options] The request options. +* @param {string} [options.shareSnapshotId] The snapshot identifier of the share. +* @param {int} [options.rangeStart] The range start. +* @param {int} [options.rangeEnd] The range end. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link FileResult}` will contain +* the range information. +* `response` will contain information related to this operation. +*/ +FileService.prototype.listRanges = function (share, directory, file, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('listRanges', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.string(file, 'file'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var resourceName = createResourceName(share, directory, file); + var options = extend(true, {}, userOptions); + var webResource = WebResource.get(resourceName) + .withQueryOption(QueryStringConstants.COMP, 'rangelist') + .withQueryOption(QueryStringConstants.SHARE_SNAPSHOT, options.shareSnapshotId); + + FileResult.setHeaders(webResource, options); + + var processResponseCallback = function (responseObject, next) { + responseObject.ranges = null; + if (!responseObject.error) { + responseObject.ranges = []; + + var ranges = []; + if (responseObject.response.body.Ranges.Range) { + ranges = responseObject.response.body.Ranges.Range; + + if (!_.isArray(ranges)) { + ranges = [ ranges ]; + } + } + + ranges.forEach(function (fileRange) { + var range = { + start: parseInt(fileRange.Start, 10), + end: parseInt(fileRange.End, 10) + }; + + responseObject.ranges.push(range); + }); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.ranges, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Clears a range. Clears all of the ranges by default, or only the ranges over a specific range of bytes if rangeStart and rangeEnd are specified. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} file The file name. File names may not start or end with the delimiter '/'. +* @param {int} rangeStart The range start. +* @param {int} rangeEnd The range end. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link FileResult}` will contain +* the directory information. +* `response` will contain information related to this operation. +*/ +FileService.prototype.clearRange = function (share, directory, file, rangeStart, rangeEnd, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('clearRange', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.string(file, 'file'); + v.shareNameIsValid(share); + v.value(rangeStart); + v.value(rangeEnd); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var request = this._updateFilesImpl(share, directory, file, rangeStart, rangeEnd, FileConstants.RangeWriteOptions.CLEAR, options); + + var processResponseCallback = function(responseObject, next) { + responseObject.fileResult = null; + if (!responseObject.error) { + responseObject.fileResult = new FileResult(share, directory, file); + responseObject.fileResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function(returnObject) { + callback(returnObject.error, returnObject.fileResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(request, null, options, processResponseCallback); +}; + +/** +* Updates a range from a stream. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} file The file name. File names may not start or end with the delimiter '/'. +* @param {Readable} readStream The Node.js Readable stream. +* @param {int} rangeStart The range start. +* @param {int} rangeEnd The range end. +* @param {object} [options] The request options. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {string} [options.transactionalContentMD5] An optional hash value used to ensure transactional integrity for the page. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link FileResult}` will contain +* the file information. +* `response` will contain information related to this operation. +*/ +FileService.prototype.createRangesFromStream = function (share, directory, file, readStream, rangeStart, rangeEnd, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('createRangesFromStream', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.string(file, 'file'); + v.object(readStream, 'readStream'); + v.shareNameIsValid(share); + v.value(rangeStart); + v.value(rangeEnd); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var requiresContentMD5 = azureutil.objectIsNull(options.transactionalContentMD5) && options.useTransactionalMD5 === true; + + var length = (rangeEnd - rangeStart) + 1; + if(length > FileConstants.MAX_UPDATE_FILE_SIZE) { + throw new Error(SR.INVALID_FILE_RANGE_FOR_UPDATE); + } + + var self = this; + if (requiresContentMD5) { + azureutil.calculateMD5(readStream, length, options, function(internalBuff, contentMD5) { + options.transactionalContentMD5 = contentMD5; + self._createRanges(share, directory, file, internalBuff, null /* stream */, rangeStart, rangeEnd, options, callback); + }); + } else { + self._createRanges(share, directory, file, null /* text */, readStream, rangeStart, rangeEnd, options, callback); + } +}; + +/** +* Uploads a file from a text string. If the file already exists on the service, it will be overwritten. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} file The file name. File names may not start or end with the delimiter '/'. +* @param {string|object} text The file text, as a string or in a Buffer. +* @param {object} [options] The request options. +* @param {SpeedSummary} [options.speedSummary] The upload tracker objects; +* @param {bool} [options.storeFileContentMD5] Specifies whether the file's ContentMD5 header should be set on uploads. +* The default value is false for files. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {object} [options.contentSettings] The file's content settings. +* @param {string} [options.contentSettings.contentType] The MIME content type of the file. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the file. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The file service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The file's content disposition. +* @param {string} [options.contentSettings.contentMD5] The file's MD5 hash. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {FileService~FileToText} callback `error` will contain information +* if an error occurs; otherwise `text` will contain the file contents, +* and `[file]{@link FileResult}` will contain the file information. +* `response` will contain information related to this operation. +* @example +* var azure = require('azure-storage'); +* var fileService = azure.createFileService(); +* +* var text = 'Hello World!'; +* +* fileService.createFileFromText('taskshare', 'taskdirectory', 'taskfile', text, function(error, result, response) { +* if (!error) { +* // file created +* } +* }); +*/ +FileService.prototype.createFileFromText = function (share, directory, file, text, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('createFileFromText', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.string(file, 'file'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var length = azureutil.objectIsNull(text) ? 0 : ((Buffer.isBuffer(text) ? text.length : Buffer.byteLength(text))); + if (length > FileConstants.MAX_UPDATE_FILE_SIZE) { + throw new Error(SR.INVALID_FILE_LENGTH); + } + + if(options.storeFileContentMD5 && azureutil.objectIsNull(azureutil.tryGetValueChain(options, ['contentSettings', 'contentMD5'], null))) { + azureutil.setObjectInnerPropertyValue(options, ['contentSettings', 'contentMD5'], azureutil.getContentMd5(text)); + } + + var self = this; + this.createFile(share, directory, file, length, options, function(error, fileResult, response) { + if(error || length === 0) { + callback(error, fileResult, response); + } + else { + self._createRanges(share, directory, file, text, null, 0, length - 1, options, callback); + } + }); +}; + +/** +* Uploads a file from a stream. If the file already exists on the service, it will be overwritten. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} file The file name. File names may not start or end with the delimiter '/'. +* @param (Stream) stream Stream to the data to store. +* @param {int} streamLength The length of the stream to upload. +* @param {object} [options] The request options. +* @param {SpeedSummary} [options.speedSummary] The download tracker objects; +* @param {bool} [options.storeFileContentMD5] Specifies whether the file's ContentMD5 header should be set on uploads. +* The default value is false for files. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {object} [options.contentSettings] The file's content settings. +* @param {string} [options.contentSettings.contentType] The MIME content type of the file. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the file. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The file service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The file's content disposition. +* @param {string} [options.contentSettings.contentMD5] The file's MD5 hash. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `[result]{@link FileResult}` will contain the file information. +* `response` will contain information related to this operation. +* @return {SpeedSummary} +* @example +* var stream = require('stream'); +* var azure = require('azure-storage'); +* var fileService = azure.createFileService(); +* +* var fileStream = new stream.Readable(); +* fileStream.push(myFileBuffer); +* fileStream.push(null); +* +* fileService.createFileFromStream('taskshare', 'taskdirectory', 'taskfile', fileStream, myFileBuffer.length, function(error, result, response) { +* if (!error) { +* // file uploaded +* } +* }); +*/ +FileService.prototype.createFileFromStream = function(share, directory, file, stream, streamLength, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('createFileFromStream', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.string(file, 'file'); + v.object(stream, 'stream'); + v.value(streamLength, 'streamLength'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + options.speedSummary = options.speedSummary || new SpeedSummary(file); + + stream.pause(); // Immediately pause the stream in order to compatible with Node v0.8 + + var self = this; + this.createFile(share, directory, file, streamLength, options, function(error) { + if(error) { + callback(error); + } else { + var chunkStream = new ChunkStreamWithStream(stream, {calcContentMd5: options.storeFileContentMD5}); + self._createFileFromChunkStream(share, directory, file, chunkStream, streamLength, options, callback); + } + }); + + return options.speedSummary; +}; + +/** +* Provides a stream to write to a file. Assumes that the file exists. +* If it does not, please create the file using createFile before calling this method or use createWriteStreamNewFile. +* Please note the `Stream` returned by this API should be used with piping. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} file The file name. File names may not start or end with the delimiter '/'. +* @param {object} [options] The request options. +* @param {SpeedSummary} [options.speedSummary] The download tracker objects; +* @param {bool} [options.storeFileContentMD5] Specifies whether the file's ContentMD5 header should be set on uploads. +* The default value is false for files. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {object} [options.contentSettings] The file's content settings. +* @param {string} [options.contentSettings.contentType] The MIME content type of the file. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the file. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The file service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The file's content disposition. +* @param {string} [options.contentSettings.contentMD5] The file's MD5 hash. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `[result]{@link FileResult}` will contain the file information. +* `response` will contain information related to this operation. +* @return {Writable} A Node.js Writable stream. +* @example +* var azure = require('azure-storage'); +* var FileService = azure.createFileService(); +* FileService.createFile(shareName, directoryName, fileName, 1024, function (err) { +* // Pipe file to a file +* var stream = fs.createReadStream(fileNameTarget).pipe(FileService.createWriteStreamToExistingFile(shareName, directoryName, fileName)); +* }); +*/ +FileService.prototype.createWriteStreamToExistingFile = function (share, directory, file, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('createWriteStreamToExistingFile', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.string(file, 'file'); + v.shareNameIsValid(share); + }); + + var options = extend(true, {}, userOptions); + + var stream = new ChunkStream({calcContentMd5: options.storeFileContentMD5}); + this._createFileFromChunkStream(share, directory, file, stream, null, options, function (error, file, response) { + if(error) { + stream.emit('error', error); + } + + if (callback) { + callback(error, file, response); + } + }); + + return stream; +}; + +/** +* Provides a stream to write to a file. Creates the file before writing data. +* Please note the `Stream` returned by this API should be used with piping. +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} file The file name. File names may not start or end with the delimiter '/'. +* @param {string} length The file length. +* @param {object} [options] The request options. +* @param {SpeedSummary} [options.speedSummary] The download tracker objects; +* @param {bool} [options.storeFileContentMD5] Specifies whether the file's ContentMD5 header should be set on uploads. +* The default value is false for files. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {object} [options.contentSettings] The file's content settings. +* @param {string} [options.contentSettings.contentType] The MIME content type of the file. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the file. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The file service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The file's content disposition. +* @param {string} [options.contentSettings.contentMD5] The file's MD5 hash. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `[result]{@link FileResult}` will contain the file information. +* `response` will contain information related to this operation. +* @return {Writable} A Node.js Writable stream. +* @example +* var azure = require('azure-storage'); +* var FileService = azure.createFileService(); +* var stream = fs.createReadStream(fileNameTarget).pipe(FileService.createWriteStreamToNewFile(shareName, directoryName, fileName)); +*/ +FileService.prototype.createWriteStreamToNewFile = function (share, directory, file, length, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('createWriteStreamToNewFile', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.string(file, 'file'); + v.value(length, 'length'); + v.shareNameIsValid(share); + }); + + var options = extend(true, {}, userOptions); + + var stream = new ChunkStream({calcContentMd5: options.storeFileContentMD5}); + stream.pause(); + + var self = this; + this.createFile(share, directory, file, length, options, function(error) { + if(error) { + stream.emit('error', error); + callback(error); + } + else { + stream.resume(); + self._createFileFromChunkStream(share, directory, file, stream, null, options, function (error, file, response) { + if(error) { + stream.emit('error', error); + } + + if (callback) { + callback(error, file, response); + } + }); + } + }); + + return stream; +}; + +/** +* Starts to copy a file to a destination within the storage account. +* +* @this {FileService} +* @param {string} sourceUri The source file or blob URI. +* @param {string} targetShare The target share name. +* @param {string} targetDirectory The target directory name. +* @param {string} targetFile The target file name. +* @param {object} [options] The request options. +* @param {object} [options.metadata] The target file metadata key/value pairs. +* @param {AccessConditions} [options.accessConditions] The access conditions. +* @param {AccessConditions} [options.sourceAccessConditions] The source access conditions. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `[result]{@link FileResult}` will contain the file information. +* `response` will contain information related to this operation. +*/ +FileService.prototype.startCopyFile = function (sourceUri, targetShare, targetDirectory, targetFile, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('startCopyFile', function (v) { + v.string(targetShare, 'targetShare'); + v.stringAllowEmpty(targetDirectory, 'targetDirectory'); + v.string(targetFile, 'targetFile'); + v.shareNameIsValid(targetShare); + v.callback(callback); + }); + + var targetResourceName = createResourceName(targetShare, targetDirectory, targetFile); + + var options = extend(true, {}, userOptions); + + var webResource = WebResource.put(targetResourceName) + .withHeader(HeaderConstants.COPY_SOURCE, sourceUri) + .addOptionalMetadataHeaders(options.metadata); + + var processResponseCallback = function (responseObject, next) { + responseObject.fileResult = null; + if (!responseObject.error) { + responseObject.fileResult = new FileResult(targetShare, targetDirectory, targetFile); + responseObject.fileResult.getPropertiesFromHeaders(responseObject.response.headers, true); + + if (options.metadata) { + responseObject.fileResult.metadata = options.metadata; + } + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.fileResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Abort a file copy operation. +* +* @this {FileService} +* @param {string} share The destination share name. +* @param {string} directory The destination directory name. +* @param {string} file The destination file name. +* @param {string} copyId The copy operation identifier. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `[result]{@link FileResult}` will contain the file information. +* `response` will contain information related to this operation. +*/ +FileService.prototype.abortCopyFile = function (share, directory, file, copyId, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('abortCopyFile', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.string(file, 'file'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var resourceName = createResourceName(share, directory, file); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.put(resourceName) + .withQueryOption(QueryStringConstants.COPY_ID, copyId) + .withQueryOption(QueryStringConstants.COMP, 'copy') + .withHeader(HeaderConstants.COPY_ACTION, 'abort'); + + var processResponseCallback = function (responseObject, next) { + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +// Internal Methods + +/** +* Updates a file from text. +* @ignore +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} file The file name. File names may not start or end with the delimiter '/'. +* @param {string} text The text string. +* @param {Readable} readStream The Node.js Readable stream. +* @param {int} rangeStart The range start. +* @param {int} rangeEnd The range end. +* @param {object} [options] The request options. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {bool} [options.transactionalContentMD5] An MD5 hash of the content. This hash is used to verify the integrity of the data during transport. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {Function(error, file, response)} callback `error` will contain information +* if an error occurs; otherwise `file` will contain +* the file information. +* `response` will contain information related to this operation. +*/ +FileService.prototype._createRanges = function (share, directory, file, text, readStream, rangeStart, rangeEnd, options, callback) { + var request = this._updateFilesImpl(share, directory, file, rangeStart, rangeEnd, FileConstants.RangeWriteOptions.UPDATE, options); + + // At this point, we have already validated that the range is less than 4MB. Therefore, we just need to calculate the contentMD5 if required. + if(!azureutil.objectIsNull(text) && azureutil.objectIsNull(options.transactionalContentMD5) && options.useTransactionalMD5 === true) { + request.withHeader(HeaderConstants.CONTENT_MD5, azureutil.getContentMd5(text)); + } + + var processResponseCallback = function (responseObject, next) { + responseObject.fileResult = null; + if (!responseObject.error) { + responseObject.fileResult = new FileResult(share, directory, file); + responseObject.fileResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.fileResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + if(!azureutil.objectIsNull(text)) { + this.performRequest(request, text, options, processResponseCallback); + } else { + this.performRequestOutputStream(request, readStream, options, processResponseCallback); + } +}; + +/** +* Uploads a file from a stream. +* @ignore +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} file The file name. File names may not start or end with the delimiter '/'. +* @param (Stream) stream Stream to the data to store. +* @param {int} streamLength The length of the stream to upload. +* @param {object|function} [options] The request options. +* @param {SpeedSummary} [options.speedSummary] The download tracker objects; +* @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {bool} [options.storeFileContentMD5] Specifies whether the file's ContentMD5 header should be set on uploads. +* @param {string} [options.contentSettings.contentType] The MIME content type of the file. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the file. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The file service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The file's content disposition. +* @param {string} [options.contentSettings.contentMD5] The MD5 hash of the file content. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {function(error, null)} callback The callback function. +* @return {SpeedSummary} +*/ +FileService.prototype._createFileFromChunkStream = function(share, directory, file, chunkStream, streamLength, options, callback) { + this.logger.debug(util.format('_createFileFromChunkStream for file %s', file)); + + var apiName = '_createRanges'; + var sizeLimitation = FileConstants.DEFAULT_WRITE_SIZE_IN_BYTES; + var originalContentMD5 = azureutil.tryGetValueChain(options, ['contentSettings', 'contentMD5'], null); + + this._setOperationExpiryTime(options); + + // initialize the speed summary + var speedSummary = options.speedSummary || new SpeedSummary(); + speedSummary.totalSize = streamLength; + + var parallelOperationThreadCount = options.parallelOperationThreadCount || this.parallelOperationThreadCount; + + // initialize chunk allocator + var allocator = new ChunkAllocator(sizeLimitation, parallelOperationThreadCount, { logger: this.logger }); + + // if this is a FileReadStream, set the allocator on that stream + if (chunkStream._stream && chunkStream._stream.setMemoryAllocator) { + chunkStream._stream.setMemoryAllocator(allocator); + } + + // initialize batch operations + var batchOperations = new BatchOperation(apiName, { logger : this.logger, enableReuseSocket : this.defaultEnableReuseSocket}); + batchOperations.setConcurrency(parallelOperationThreadCount); + + // initialize options + var rangeOptions = { + timeoutIntervalInMs: options.timeoutIntervalInMs, + clientRequestTimeoutInMs: options.clientRequestTimeoutInMs, + operationExpiryTime: options.operationExpiryTime + }; + + var self = this; + chunkStream.on('data', function (data, range) { + var operation = null; + var full = false; + var autoIncrement = speedSummary.getAutoIncrementFunction(data.length); + + if(data.length > sizeLimitation) { + throw new Error(util.format(SR.EXCEEDED_SIZE_LIMITATION, sizeLimitation, data.length)); + } + + if (options.useTransactionalMD5) { + //calculate content md5 for the current uploading block data + var contentMD5 = azureutil.getContentMd5(data); + rangeOptions.transactionalContentMD5 = contentMD5; + } + + if (azureutil.isBufferAllZero(data)) { + self.logger.debug(util.format('Skip upload data from %s bytes to %s bytes to file %s', range.start, range.end, file)); + speedSummary.increment(data.length); + } else { + operation = new BatchOperation.RestOperation(self, apiName, share, directory, file, data, null, range.start, range.end, rangeOptions, function (error) { + if(!error) { + autoIncrement(); + } else { + self.logger.debug(util.format('Stop downloading data as error happens. Error: %s', util.inspect(error))); + chunkStream.stop(); + } + + allocator.releaseBuffer(data); + data = null; + }); + } + + if (operation) { + full = batchOperations.addOperation(operation); + operation = null; + + if(full) { + self.logger.debug('file stream paused'); + chunkStream.pause(); + } + } + }); + + chunkStream.on('end', function () { + self.logger.debug(util.format('File read stream ended for file %s', file)); + batchOperations.enableComplete(); + }); + + batchOperations.on('drain', function () { + self.logger.debug('File stream resume'); + chunkStream.resume(); + }); + + batchOperations.on('end', function (error) { + self.logger.debug('batch operations commited'); + + if (error) { + callback(error); + return; + } + + if (originalContentMD5) { + options.contentSettings.contentMD5 = originalContentMD5; + } else if (options.storeFileContentMD5) { + azureutil.setObjectInnerPropertyValue(options, ['contentSettings', 'contentMD5'], chunkStream.getContentMd5('base64')); + } + + // upload file completely + var fileProperties = extend(false, options.contentSettings, { contentLength: options.streamLength }); + self.setFileProperties(share, directory, file, fileProperties, function (error, file, response) { + chunkStream.finish(); + callback(error, file, response); + }); + }); + + return speedSummary; +}; + +/** +* Downloads a file into a stream. +* @ignore +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} file The file name. File names may not start or end with the delimiter '/'. +* @param {Writable} writeStream The Node.js Writable stream. +* @param {object} [options] The request options. +* @param {string} [options.shareSnapshotId] The snapshot identifier of the share. +* @param {string} [options.rangeStart] Return only the bytes of the file in the specified range. +* @param {string} [options.rangeEnd] Return only the bytes of the file in the specified range. +* @param {boolean} [options.useTransactionalMD5] When set to true, Calculate and send/validate content MD5 for transactions. +* @param {boolean} [options.disableContentMD5Validation] When set to true, MD5 validation will be disabled when downloading files. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `result` will contain the file information. +* `response` will contain information related to this operation. +* +* @return {SpeedSummary} +*/ +FileService.prototype._getFileToStream = function (share, directory, file, writeStream, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('_getFileToStream', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.string(file, 'file'); + v.object(writeStream, 'writeStream'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var resourceName = createResourceName(share, directory, file); + var webResource = WebResource.get(resourceName) + .withQueryOption(QueryStringConstants.SHARE_SNAPSHOT, options.shareSnapshotId) + .withRawResponse(); + + FileResult.setHeaders(webResource, options); + this._setRangeContentMD5Header(webResource, options); + + var self = this; + var processResponseCallback = function (responseObject, next) { + responseObject.fileResult = null; + + if (!responseObject.error) { + responseObject.fileResult = new FileResult(share, directory, file); + responseObject.fileResult.metadata = self.parseMetadataHeaders(responseObject.response.headers); + responseObject.fileResult.getPropertiesFromHeaders(responseObject.response.headers, true); + + self._validateLengthAndMD5(options, responseObject); + + if (options.speedSummary) { + options.speedSummary.increment(responseObject.length); + } + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.fileResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequestInputStream(webResource, null, writeStream, options, processResponseCallback); +}; + +/** +* Downloads a file into a range stream. +* @ignore +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} file The file name. File names may not start or end with the delimiter '/'. +* @param {Writable} writeStream The Node.js Writable stream. +* @param {object} [options] The request options. +* @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. +* @param {string} [options.rangeStart] Return only the bytes of the file in the specified range. +* @param {string} [options.rangeEnd] Return only the bytes of the file in the specified range. +* @param {boolean} [options.useTransactionalMD5] When set to true, Calculate and send/validate content MD5 for transactions. +* @param {boolean} [options.disableContentMD5Validation] When set to true, MD5 validation will be disabled when downloading files. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `result` will contain the file information. +* `response` will contain information related to this operation. +* +* @return {SpeedSummary} +*/ +FileService.prototype._getFileToRangeStream = function (share, directory, file, writeStream, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('_getFileToRangeStream', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.string(file, 'file'); + v.object(writeStream, 'writeStream'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var speedSummary = userOptions.speedSummary || new SpeedSummary(file); + var parallelOperationThreadCount = userOptions.parallelOperationThreadCount || this.parallelOperationThreadCount; + var batchOperations = new BatchOperation('getfile', { callbackInOrder: true, logger : this.logger, enableReuseSocket : this.defaultEnableReuseSocket }); + batchOperations.setConcurrency(parallelOperationThreadCount); + + var rangeStream = new FileRangeStream(this, share, directory, file, userOptions); + + var self = this; + var checkMD5sum = !userOptions.disableContentMD5Validation; + var md5Hash = null; + if (checkMD5sum) { + md5Hash = new Md5Wrapper().createMd5Hash(); + } + + var savedFileResult = null; + var savedFileResponse = null; + + rangeStream.on('range', function (range) { + if (!speedSummary.totalSize) { + speedSummary.totalSize = rangeStream.rangeSize; + } + + var requestOptions = { + rangeStart : range.start, + rangeEnd : range.end, + responseEncoding : null //Use Buffer to store the response data + }; + + var rangeSize = range.size; + requestOptions.shareSnapshotId = userOptions.shareSnapshotId; + requestOptions.timeoutIntervalInMs = userOptions.timeoutIntervalInMs; + requestOptions.clientRequestTimeoutInMs = userOptions.clientRequestTimeoutInMs; + requestOptions.useTransactionalMD5 = userOptions.useTransactionalMD5; + + if (range.dataSize === 0) { + var autoIncrement = speedSummary.getAutoIncrementFunction(rangeSize); + //No operation to do and only wait for write zero to file in callback + var writeZeroOperation = new BatchOperation.CommonOperation(BatchOperation.noOperation, function (error) { + if (error) return; + var bufferAvailable = azureutil.writeZerosToStream(writeStream, rangeSize, md5Hash, autoIncrement); + //There is no need to pause the rangestream since we can perform http request and write disk at the same time + self.logger.debug(util.format('Write %s bytes Zero from %s to %s', rangeSize, range.start, range.end)); + if (!bufferAvailable) { + self.logger.debug('Write stream is full and pause batch operation'); + batchOperations.pause(); + } + }); + batchOperations.addOperation(writeZeroOperation); + return; + } + + if (range.start > range.end) { + return; + } + + var operation = new BatchOperation.RestOperation(self, 'getFileToText', share, directory, file, requestOptions, function (error, content, fileResult, response) { + if (!error) { + if (rangeSize !== content.length) { + self.logger.warn(util.format('Request %s bytes, but server returns %s bytes', rangeSize, content.length)); + } + //Save one of the succeeded callback parameters and use them at the final callback + if (!savedFileResult) { + savedFileResult = fileResult; + } + if (!savedFileResponse) { + savedFileResponse = response; + } + var autoIncrement = speedSummary.getAutoIncrementFunction(content.length); + var bufferAvailable = writeStream.write(content, autoIncrement); + if (!bufferAvailable) { + self.logger.debug('Write stream is full and pause batch operation'); + batchOperations.pause(); + } + if (md5Hash) { + md5Hash.update(content); + } + content = null; + } else { + self.logger.debug(util.format('Stop downloading data as error happens. Error: %s', util.inspect(error))); + rangeStream.stop(); + } + }); + + var full = batchOperations.addOperation(operation); + if (full) { + self.logger.debug('Pause range stream'); + rangeStream.pause(); + } + }); + + rangeStream.on('end', function () { + self.logger.debug('Range stream has ended.'); + batchOperations.enableComplete(); + }); + + batchOperations.on('drain', function () { + self.logger.debug('Resume range stream'); + rangeStream.resume(); + }); + + writeStream.on('drain', function () { + self.logger.debug('Resume batch operations'); + batchOperations.resume(); + }); + + batchOperations.on('end', function (error) { + self.logger.debug('Download completed!'); + if (error) { + callback(error); + } else { + writeStream.end(function () { + self.logger.debug('Write stream has ended'); + if (!savedFileResult) { + savedFileResult = {}; + } + azureutil.setObjectInnerPropertyValue(savedFileResult, ['contentSettings', 'contentMD5'], azureutil.tryGetValueChain(userOptions, ['contentSettings', 'contentMD5'], null)); + savedFileResult.clientSideContentMD5 = null; + if (md5Hash) { + savedFileResult.clientSideContentMD5 = md5Hash.digest('base64'); + } + callback(error, savedFileResult, savedFileResponse); + }); + } + }); + + var listOptions = { + timeoutIntervalInMs : userOptions.timeoutIntervalInMs, + clientRequestTimeoutInMs : userOptions.clientRequestTimeoutInMs, + }; + + rangeStream.list(listOptions); + return speedSummary; +}; + +/** +* @ignore +*/ +FileService.prototype._setRangeContentMD5Header = function (webResource, options) { + if(!azureutil.objectIsNull(options.rangeStart) && options.useTransactionalMD5) { + if(azureutil.objectIsNull(options.rangeEnd)) { + throw new ArgumentNullError(util.format(SR.ARGUMENT_NULL_OR_EMPTY, options.rangeEndHeader)); + } + + var size = parseInt(options.rangeEnd, 10) - parseInt(options.rangeStart, 10) + 1; + if (size > FileConstants.MAX_RANGE_GET_SIZE_WITH_MD5) { + throw new Error(SR.INVALID_RANGE_FOR_MD5); + } else { + webResource.withHeader(HeaderConstants.RANGE_GET_CONTENT_MD5, 'true'); + } + } +}; + +/** +* @ignore +*/ +FileService.prototype._updateFilesImpl = function (share, directory, file, rangeStart, rangeEnd, writeMethod, options) { + var resourceName = createResourceName(share, directory, file); + var webResource = WebResource.put(resourceName) + .withQueryOption(QueryStringConstants.COMP, 'range') + .withHeader(HeaderConstants.CONTENT_TYPE, 'application/octet-stream') + .withHeader(HeaderConstants.FILE_WRITE, writeMethod); + + options.rangeStart = rangeStart; + options.rangeEnd = rangeEnd; + + FileResult.setHeaders(webResource, options); + + if(writeMethod === FileConstants.RangeWriteOptions.UPDATE) { + var size = (rangeEnd - rangeStart) + 1; + webResource.withHeader(HeaderConstants.CONTENT_LENGTH, size); + } else { + webResource.withHeader(HeaderConstants.CONTENT_LENGTH, 0); + } + + return webResource; +}; + +/** +* @ignore +*/ +FileService.prototype._validateLengthAndMD5 = function (options, responseObject) { + var storedMD5 = responseObject.response.headers[Constants.HeaderConstants.CONTENT_MD5]; + var contentLength; + + if (!azureutil.objectIsNull(responseObject.response.headers[Constants.HeaderConstants.CONTENT_LENGTH])) { + contentLength = parseInt(responseObject.response.headers[Constants.HeaderConstants.CONTENT_LENGTH], 10); + } + + // If the user has not specified this option, the default value should be false. + if(azureutil.objectIsNull(options.disableContentMD5Validation)) { + options.disableContentMD5Validation = false; + } + + // None of the below cases should be retried. So set the error in every case so the retry policy filter handle knows that it shouldn't be retried. + if (options.disableContentMD5Validation === false && options.useTransactionalMD5 === true && azureutil.objectIsNull(storedMD5)) { + responseObject.error = new Error(SR.MD5_NOT_PRESENT_ERROR); + responseObject.retryable = false; + } + + // Validate length and if required, MD5. + // If getFileToText called this method, then the responseObject.length and responseObject.contentMD5 are not set. Calculate them first using responseObject.response.body and then validate. + if(azureutil.objectIsNull(responseObject.length)) { + if (typeof responseObject.response.body == 'string') { + responseObject.length = Buffer.byteLength(responseObject.response.body); + } else if (Buffer.isBuffer(responseObject.response.body)) { + responseObject.length = responseObject.response.body.length; + } + } + + if(!azureutil.objectIsNull(contentLength) && responseObject.length !== contentLength) { + responseObject.error = new Error(SR.CONTENT_LENGTH_MISMATCH); + responseObject.retryable = false; + } + + if(options.disableContentMD5Validation === false && azureutil.objectIsNull(responseObject.contentMD5)) { + responseObject.contentMD5 = azureutil.getContentMd5(responseObject.response.body); + } + + if (options.disableContentMD5Validation === false && !azureutil.objectIsNull(storedMD5) && storedMD5 !== responseObject.contentMD5) { + responseObject.error = new Error(util.format(SR.HASH_MISMATCH, storedMD5, responseObject.contentMD5)); + responseObject.retryable = false; + } +}; + +/** +* Checks whether or not a file exists on the service. +* @ignore +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} file The file name. File names may not start or end with the delimiter '/'. +* @param {string} primaryOnly If true, the request will be executed against the primary storage location. +* @param {object} [options] The request options. +* @param {string} [options.shareSnapshotId] The snapshot identifier of the share. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {Function(error, result, response)} callback `error` will contain information +* if an error occurs; otherwise `result` will contain +* the file information including the `exists` boolean member. +* `response` will contain information related to this operation. +*/ +FileService.prototype._doesFileExist = function (share, directory, file, primaryOnly, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('FileExists', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.string(file, 'file'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + var resourceName = createResourceName(share, directory, file); + var webResource = WebResource.head(resourceName) + .withQueryOption(QueryStringConstants.SHARE_SNAPSHOT, options.shareSnapshotId); + + /*if(primaryOnly === false) { + options.requestLocationMode = RequestLocationMode.PRIMARY_OR_SECONDARY; + }*/ + + var processResponseCallback = function (responseObject, next) { + responseObject.fileResult = new FileResult(share, directory, file); + if (!responseObject.error) { + responseObject.fileResult.exists = true; + responseObject.fileResult.getPropertiesFromHeaders(responseObject.response.headers); + + } else if (responseObject.error && responseObject.error.statusCode === Constants.HttpConstants.HttpResponseCodes.NotFound) { + responseObject.error = null; + responseObject.fileResult.exists = false; + responseObject.response.isSuccessful = true; + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.fileResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Checks whether or not a directory exists on the service. +* @ignore +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} primaryOnly If true, the request will be executed against the primary storage location. +* @param {object} [options] The request options. +* @param {string} [options.shareSnapshotId] The share snapshot identifier. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {Function(error, result, response)} callback `error` will contain information +* if an error occurs; otherwise `result` will contain +* the directory information including `exists` boolean member. +* `response` will contain information related to this operation. +*/ +FileService.prototype._doesDirectoryExist = function (share, directory, primaryOnly, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('directoryExists', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.head(createResourceName(share, directory)) + .withQueryOption(QueryStringConstants.RESTYPE, 'directory') + .withQueryOption(QueryStringConstants.SHARE_SNAPSHOT, options.shareSnapshotId); + + /*if(primaryOnly === false) { + options.requestLocationMode = RequestLocationMode.PRIMARY_OR_SECONDARY; + }*/ + + var self = this; + var processResponseCallback = function(responseObject, next){ + responseObject.directoryResult = new DirectoryResult(directory); + responseObject.directoryResult.exists = false; + + if (!responseObject.error) { + responseObject.directoryResult.exists = true; + responseObject.directoryResult.metadata = self.parseMetadataHeaders(responseObject.response.headers); + responseObject.directoryResult.getPropertiesFromHeaders(responseObject.response.headers); + + } else if (responseObject.error && responseObject.error.statusCode === Constants.HttpConstants.HttpResponseCodes.NotFound) { + responseObject.error = null; + responseObject.response.isSuccessful = true; + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.directoryResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Checks whether or not a share exists on the service. +* @ignore +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} [options.shareSnapshotId] The share snapshot identifier. +* @param {string} primaryOnly If true, the request will be executed against the primary storage location. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {Function(error, result, response)} callback `error` will contain information +* if an error occurs; otherwise `result` will contain +* the share information including `exists` boolean member. +* `response` will contain information related to this operation. +*/ +FileService.prototype._doesShareExist = function (share, primaryOnly, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('shareExists', function (v) { + v.string(share, 'share'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.head(share) + .withQueryOption(QueryStringConstants.RESTYPE, 'share') + .withQueryOption(QueryStringConstants.SHARE_SNAPSHOT, options.shareSnapshotId); + + /*if(primaryOnly === false) { + options.requestLocationMode = RequestLocationMode.PRIMARY_OR_SECONDARY; + }*/ + + var processResponseCallback = function(responseObject, next){ + responseObject.shareResult = new ShareResult(share); + responseObject.shareResult.exists = false; + + if (!responseObject.error) { + responseObject.shareResult.exists = true; + responseObject.shareResult.getPropertiesFromHeaders(responseObject.response.headers); + + } else if (responseObject.error && responseObject.error.statusCode === Constants.HttpConstants.HttpResponseCodes.NotFound) { + responseObject.error = null; + responseObject.response.isSuccessful = true; + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.shareResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* The callback for {FileService~getFileToText}. +* @typedef {function} FileService~FileToText +* @param {object} error If an error occurs, the error information. +* @param {string} text The text returned from the file. +* @param {object} file Information about the file. +* @param {object} response Information related to this operation. +*/ + +FileService.SpeedSummary = SpeedSummary; + +module.exports = FileService; + + +/***/ }), + +/***/ 50007: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var azureCommon = __nccwpck_require__(18418); +var extend = __nccwpck_require__(99237); +var fs = __nccwpck_require__(57147); +var FileService = __nccwpck_require__(99087); + +var azureutil = azureCommon.util; +var FileReadStream = azureCommon.FileReadStream; +var SpeedSummary = azureCommon.SpeedSummary; +var validate = azureCommon.validate; + +/** +* Downloads an Azure file into a file. +* (Not available in the JavaScript Client Library for Browsers) +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} file The file name. File names may not start or end with the delimiter '/'. +* @param {string} localFileName The local path to the file to be downloaded. +* @param {object} [options] The request options. +* @param {string} [options.shareSnapshotId] The snapshot identifier of the share. +* @param {boolean} [options.skipSizeCheck] Skip the size check to perform direct download. +* Set the option to true for small files. +* Parallel download and speed summary won't work with this option on. +* @param {SpeedSummary} [options.speedSummary] The download tracker objects. +* @param {int} [options.parallelOperationThreadCount] The number of parallel operations that may be performed when uploading. +* @param {string} [options.rangeStart] Return only the bytes of the file in the specified range. +* @param {string} [options.rangeEnd] Return only the bytes of the file in the specified range. +* @param {boolean} [options.useTransactionalMD5] When set to true, Calculate and send/validate content MD5 for transactions. +* @param {boolean} [options.disableContentMD5Validation] When set to true, MD5 validation will be disabled when downloading files. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `[result]{@link FileResult}` will contain the file information. +* `response` will contain information related to this operation. +* @return {SpeedSummary} +* +* @example +* var azure = require('azure-storage'); +* var FileService = azure.createFileService(); +* FileService.getFileToLocalFile('taskshare', taskdirectory', 'task1', 'task1-download.txt', function(error, serverFile) { +* if(!error) { +* // file available in serverFile.file variable +* } +*/ +FileService.prototype.getFileToLocalFile = function (share, directory, file, localFileName, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + userOptions.speedSummary = userOptions.speedSummary || new SpeedSummary(file); + + validate.validateArgs('getFileToLocalFile', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.string(file, 'file'); + v.string(localFileName, 'localFileName'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + var writeStream = fs.createWriteStream(localFileName); + writeStream.on('error', function (error) { + callback(error); + }); + + this.getFileToStream(share, directory, file, writeStream, options, function (error, responseFile, response) { + if (error) { + writeStream.end(function () { + // If the download failed from the beginning, remove the file. + if (fs.existsSync(localFileName) && writeStream.bytesWritten === 0) { + fs.unlinkSync(localFileName); + } + callback(error, responseFile, response); + }); + } else { + callback(error, responseFile, response); + } + }); + + return options.speedSummary; +}; + + +/** +* Uploads a file to storage from a local file. If the file already exists on the service, it will be overwritten. +* (Not available in the JavaScript Client Library for Browsers) +* +* @this {FileService} +* @param {string} share The share name. +* @param {string} directory The directory name. Use '' to refer to the base directory. +* @param {string} file The file name. File names may not start or end with the delimiter '/'. +* @param (string) localFileName The local path to the file to be uploaded. +* @param {object} [options] The request options. +* @param {SpeedSummary} [options.speedSummary] The download tracker objects; +* @param {bool} [options.storeFileContentMD5] Specifies whether the file's ContentMD5 header should be set on uploads. +* The default value is false for files. +* @param {bool} [options.useTransactionalMD5] Calculate and send/validate content MD5 for transactions. +* @param {object} [options.contentSettings] The file's content settings. +* @param {string} [options.contentSettings.contentType] The MIME content type of the file. The default type is application/octet-stream. +* @param {string} [options.contentSettings.contentEncoding] The content encodings that have been applied to the file. +* @param {string} [options.contentSettings.contentLanguage] The natural languages used by this resource. +* @param {string} [options.contentSettings.cacheControl] The file service stores this value but does not use or modify it. +* @param {string} [options.contentSettings.contentDisposition] The file's content disposition. +* @param {string} [options.contentSettings.contentMD5] The file's MD5 hash. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `[result]{@link FileResult}` will contain the file information. +* `response` will contain information related to this operation. +* @return {SpeedSummary} +*/ +FileService.prototype.createFileFromLocalFile = function (share, directory, file, localFileName, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('createFileFromLocalFile', function (v) { + v.string(share, 'share'); + v.stringAllowEmpty(directory, 'directory'); + v.string(file, 'file'); + v.string(localFileName, 'localFileName'); + v.shareNameIsValid(share); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + options.speedSummary = options.speedSummary || new SpeedSummary(file); + + var self = this; + fs.stat(localFileName, function(error, stat) { + if (error) { + callback(error); + } else { + self.createFile(share, directory, file, stat.size, options, function(error) { + if(error) { + callback(error); + } else { + var stream = new FileReadStream(localFileName, {calcContentMd5: options.storeFileContentMD5}); + self._createFileFromChunkStream(share, directory, file, stream, stat.size, options, callback); + } + }); + } + }); + + return options.speedSummary; +}; + +module.exports = FileService; + +/***/ }), + +/***/ 83656: +/***/ ((module, exports) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Expose 'FileUtilities'. +exports = module.exports; + +/** +* Defines constants, enums, and utility functions for use with the File service. +* @namespace FileUtilities +*/ +var FileUtilities = { + /** + * Permission types + * + * @const + * @enum {string} + */ + SharedAccessPermissions: { + READ: 'r', + CREATE: 'c', + WRITE: 'w', + DELETE: 'd', + LIST: 'l' + }, + + /** + * Listing details. + * + * @const + * @enum {string} + */ + ListingDetails: { + METADATA: 'metadata' + }, + + /** + * File and share public access types. + * + * @const + * @enum {string} + */ + SharePublicAccessType: { + OFF: null, + SHARE: 'share', + FILE: 'file' + }, + + /** + * Deletion options for share snapshots + * + * @const + * @enum {string} + */ + ShareSnapshotDeleteOptions: { + SHARE_AND_SNAPSHOTS: 'include' + }, +}; + +module.exports = FileUtilities; + +/***/ }), + +/***/ 56575: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +var util = __nccwpck_require__(73837); +var RangeStream = __nccwpck_require__(98664); +var Constants = __nccwpck_require__(60658); + +/** +* File range stream +*/ +function FileRangeStream(fileServiceClient, share, directory, file, options) { + FileRangeStream['super_'].call(this, fileServiceClient, null, null, options); + + this._lengthHeader = Constants.HeaderConstants.FILE_CONTENT_LENGTH; + if (options.minRangeSize) { + this._minRangeSize = options.minRangeSize; + } else { + this._minRangeSize = Constants.FileConstants.MIN_WRITE_FILE_SIZE_IN_BYTES; + } + if (options.maxRangeSize) { + this._maxRangeSize = options.maxRangeSize; + } else { + this._maxRangeSize = Constants.FileConstants.DEFAULT_WRITE_SIZE_IN_BYTES; + } + this._listFunc = fileServiceClient.listRanges; + this._resourcePath.push(share); + this._resourcePath.push(directory); + this._resourcePath.push(file); +} + +util.inherits(FileRangeStream, RangeStream); + +module.exports = FileRangeStream; + + +/***/ }), + +/***/ 75667: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var HeaderConstants = (__nccwpck_require__(82187).Constants.HeaderConstants); + +/** +* Creates a new DirectoryResult object. +* @class +* The DirectoryResult class is used to store the directory information. +* + * @property {string} name The container name. + * @property {object} metadata The metadata key/value pair. + * @property {string} etag The etag. + * @property {string} lastModified The date/time that the directory was last modified. + * @property {string} requestId The request id. + * @property {string} serverEncrypted If the directory metadata is completely encrypted using the specified algorithm. true/false. + * +* @constructor +* @param {string} [name] The directory name. +*/ +function DirectoryResult(name) { + this.name = name; +} + +DirectoryResult.parse = function (dirXml) { + return new DirectoryResult(dirXml.Name); +}; + +DirectoryResult.prototype.getPropertiesFromHeaders = function (headers) { + var self = this; + + var setDirectoryPropertyFromHeaders = function (directoryProperty, headerProperty) { + if (!self[directoryProperty] && headers[headerProperty.toLowerCase()]) { + self[directoryProperty] = headers[headerProperty.toLowerCase()]; + } + }; + + setDirectoryPropertyFromHeaders('etag', HeaderConstants.ETAG); + setDirectoryPropertyFromHeaders('lastModified', HeaderConstants.LAST_MODIFIED); + setDirectoryPropertyFromHeaders('requestId', HeaderConstants.REQUEST_ID); + setDirectoryPropertyFromHeaders('serverEncrypted', HeaderConstants.SERVER_ENCRYPTED); +}; + +module.exports = DirectoryResult; + +/***/ }), + +/***/ 7253: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var _ = __nccwpck_require__(7404); + +var azureCommon = __nccwpck_require__(82187); +var azureutil = azureCommon.util; +var Constants = azureCommon.Constants; +var HeaderConstants = Constants.HeaderConstants; + +/** +* Creates a new FileResult object. +* @class +* The FileResult class is used to store the file information. +* + * @property {string} share The share name. + * @property {string} directory The directory name. + * @property {string} name The file name. + * @property {object} metadata The metadata key/value pair. + * @property {string} etag The etag. + * @property {string} lastModified The date/time that the file was last modified. + * @property {string} requestId The request id. + * @property {string} acceptRanges The accept ranges. + * @property {string} serverEncrypted If the file data and application metadata are completely encrypted using the specified algorithm. true/false. + * @property {string} contentRange The content range + * @property {string} contentLength The size of the file in bytes. + * @property {object} contentSettings The content settings. + * @property {string} contentSettings.contentType The content type. + * @property {string} contentSettings.contentEncoding The content encoding. + * @property {string} contentSettings.contentLanguage The content language. + * @property {string} contentSettings.cacheControl The cache control. + * @property {string} contentSettings.contentDisposition The content disposition. + * @property {string} contentSettings.contentMD5 The content MD5 hash. + * @property {object} copy The copy information. + * @property {string} copy.id The copy id. + * @property {string} copy.status The copy status. + * @property {string} copy.completionTime The copy completion time. + * @property {string} copy.statusDescription The copy status description. + * @property {string} copy.progress The copy progress. + * @property {string} copy.source The copy source. + * +* @constructor +* @param {string} [share] The share name. +* @param {string} [directory] The directory name. +* @param {string} [name] The file name. +*/ +function FileResult(share, directory, name) { + this.share = share; + this.directory = directory; + this.name = name; +} + +FileResult.parse = function (entryXml) { + var listResult = new FileResult(); + for (var propertyName in entryXml) { + if (propertyName === 'Properties') { + // Lift out the properties onto the main object to keep consistent across all APIs like: getFileProperties + azureutil.setPropertyValueFromXML(listResult, entryXml[propertyName], true); + } else { + listResult[propertyName.toLowerCase()] = entryXml[propertyName]; + } + } + + return listResult; +}; + +var responseHeaders = { + 'acceptRanges': 'ACCEPT_RANGES', + 'contentLength': 'CONTENT_LENGTH', + 'contentRange': 'CONTENT_RANGE', + + 'contentSettings.contentType': 'CONTENT_TYPE', + 'contentSettings.contentEncoding': 'CONTENT_ENCODING', + 'contentSettings.contentLanguage': 'CONTENT_LANGUAGE', + 'contentSettings.cacheControl': 'CACHE_CONTROL', + 'contentSettings.contentDisposition': 'CONTENT_DISPOSITION', + 'contentSettings.contentMD5': 'CONTENT_MD5', + 'contentSettings.fileContentMD5': 'FILE_CONTENT_MD5', + + 'copy.id': 'COPY_ID', + 'copy.status': 'COPY_STATUS', + 'copy.source': 'COPY_SOURCE', + 'copy.progress': 'COPY_PROGRESS', + 'copy.completionTime': 'COPY_COMPLETION_TIME', + 'copy.statusDescription': 'COPY_STATUS_DESCRIPTION' +}; + +FileResult.prototype.getPropertiesFromHeaders = function (headers, content) { + var self = this; + + var setFilePropertyFromHeaders = function (fileProperty, headerProperty) { + if (!azureutil.tryGetValueChain(self, fileProperty.split('.'), null) && headers[headerProperty.toLowerCase()]) { + azureutil.setObjectInnerPropertyValue(self, fileProperty.split('.'), headers[headerProperty.toLowerCase()]); + + if (fileProperty === 'copy.progress') { + var info = azureutil.parseCopyProgress(self.copy.progress); + self.copy.bytesCopied = parseInt(info.bytesCopied); + self.copy.totalBytes = parseInt(info.totalBytes); + } + } + }; + + // For range get, 'x-ms-content-md5' indicate the overall MD5 of the file. Try to set the contentMD5 using this header if it presents + setFilePropertyFromHeaders('contentSettings.contentMD5', HeaderConstants.FILE_CONTENT_MD5); + + setFilePropertyFromHeaders('etag', HeaderConstants.ETAG); + setFilePropertyFromHeaders('lastModified', HeaderConstants.LAST_MODIFIED); + setFilePropertyFromHeaders('requestId', HeaderConstants.REQUEST_ID); + setFilePropertyFromHeaders('serverEncrypted', HeaderConstants.SERVER_ENCRYPTED); + + if (content) { + _.chain(responseHeaders).pairs().each(function (pair) { + var property = pair[0]; + var header = HeaderConstants[pair[1]]; + setFilePropertyFromHeaders(property, header); + }); + + } +}; + +/** +* This method sets the HTTP headers and is used by all methods except setFileProperties and createFile. +* Those methods will set the x-ms-* headers using setProperties. +*/ +FileResult.setHeaders = function (webResource, options) { + var setHeaderProperty = function (headerProperty, fileProperty) { + var propertyValue = azureutil.tryGetValueChain(options, fileProperty.split('.'), null); + if (propertyValue) { + webResource.withHeader(headerProperty, propertyValue); + } + }; + + if (options) { + // Content-MD5 + setHeaderProperty(HeaderConstants.CONTENT_MD5, 'transactionalContentMD5'); + + // Content-Length + setHeaderProperty(HeaderConstants.CONTENT_LENGTH, 'contentLength'); + + // Range + if (!azureutil.objectIsNull(options.rangeStart)) { + var range = 'bytes=' + options.rangeStart + '-'; + + if (!azureutil.objectIsNull(options.rangeEnd)) { + range += options.rangeEnd; + } + + webResource.withHeader(HeaderConstants.STORAGE_RANGE, range); + } + } +}; + +/** +* This method sets the x-ms-* headers and is used by setFileProperties and createFile. +* All other methods will set the regular HTTP headers using setHeaders. +*/ +FileResult.setProperties = function (webResource, options) { + var setHeaderProperty = function (headerProperty, fileProperty) { + var propertyValue = azureutil.tryGetValueChain(options, fileProperty.split('.'), null); + if (propertyValue) { + webResource.withHeader(headerProperty, propertyValue); + } + }; + + if (options) { + // Content-Length + setHeaderProperty(HeaderConstants.FILE_CONTENT_LENGTH, 'contentLength'); + + // Content-Type + setHeaderProperty(HeaderConstants.FILE_CONTENT_TYPE, 'contentSettings.contentType'); + + // Content-Encoding + setHeaderProperty(HeaderConstants.FILE_CONTENT_ENCODING, 'contentSettings.contentEncoding'); + + // Content-Language + setHeaderProperty(HeaderConstants.FILE_CONTENT_LANGUAGE, 'contentSettings.contentLanguage'); + + // Content-Disposition + setHeaderProperty(HeaderConstants.FILE_CONTENT_DISPOSITION, 'contentSettings.contentDisposition'); + + // Cache-Control + setHeaderProperty(HeaderConstants.FILE_CACHE_CONTROL, 'contentSettings.cacheControl'); + + // Content-MD5 + setHeaderProperty(HeaderConstants.FILE_CONTENT_MD5, 'contentSettings.contentMD5'); + + if (options.metadata) { + webResource.addOptionalMetadataHeaders(options.metadata); + } + } +}; + +module.exports = FileResult; + +/***/ }), + +/***/ 12636: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var azureCommon = __nccwpck_require__(82187); +var azureutil = azureCommon.util; +var Constants = azureCommon.Constants; +var HeaderConstants = Constants.HeaderConstants; + +/** +* Creates a new ShareResult object. +* @class +* The ShareResult class is used to store the share information. +* + * @property {string} name The share name. + * @property {object} metadata The metadata key/value pair. + * @property {string} etag The etag. + * @property {string} lastModified The date/time that the share was last modified. + * @property {string} requestId The request id. + * @property {string} quota The share quota. + * +* @constructor +* @param {string} [name] The share name. +*/ +function ShareResult(name) { + this.name = name; +} + +ShareResult.parse = function (shareXml, name) { + var shareResult = new ShareResult(name); + for (var propertyName in shareXml) { + if (shareXml.hasOwnProperty(propertyName)) { + if (propertyName === 'Properties') { + // Lift out the properties onto the main object to keep consistent across all APIs like: getShareProperties + azureutil.setPropertyValueFromXML(shareResult, shareXml[propertyName], true); + } else if (propertyName === 'Metadata' || propertyName === 'ShareStats') { + var resultPropertyName = azureutil.normalizePropertyNameFromXML(propertyName); + shareResult[resultPropertyName] = {}; + azureutil.setPropertyValueFromXML(shareResult[resultPropertyName], shareXml[propertyName], propertyName === 'ShareStats'); + } else { + shareResult[propertyName.toLowerCase()] = shareXml[propertyName]; + } + } + } + + return shareResult; +}; + +ShareResult.prototype.getPropertiesFromHeaders = function (headers) { + var self = this; + + var setSharePropertyFromHeaders = function (shareProperty, headerProperty) { + if (!self[shareProperty] && headers[headerProperty.toLowerCase()]) { + self[shareProperty] = headers[headerProperty.toLowerCase()]; + } + }; + + setSharePropertyFromHeaders('etag', HeaderConstants.ETAG); + setSharePropertyFromHeaders('lastModified', HeaderConstants.LAST_MODIFIED); + setSharePropertyFromHeaders('requestId', HeaderConstants.REQUEST_ID); + setSharePropertyFromHeaders('quota', HeaderConstants.SHARE_QUOTA); +}; + +/** +* The share ACL settings. +* @typedef {object} ShareAclResult +* @extends {ShareAclResult} +* @property {Object.} signedIdentifiers The container ACL settings. See `[AccessPolicy]{@link AccessPolicy}` for detailed information. +*/ + +module.exports = ShareResult; + +/***/ }), + +/***/ 4438: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var azureCommon = __nccwpck_require__(82187); +var azureutil = azureCommon.util; +var xmlbuilder = azureCommon.xmlbuilder; +var Constants = azureCommon.Constants; + +var HeaderConstants = Constants.HeaderConstants; + +/** +* Creates a new QueueMessageResult object. +* @class +* The QueueMessageResult class is used to store the queue message information. +* +* @property {string} queue The queue name. +* @property {string} messageId The message id. +* @property {string} popReceipt The pop receipt. +* @property {string} messageText The message text. +* @property {string} timeNextVisible The time next visible. +* @property {string} insertionTime The insertion time. +* @property {string} expirationTime The expiration time. +* @property {number} dequeueCount The dequeue count. + * +* @constructor +* @param {string} [queue] The queue name. +* @param {string} [messageId] The message id. +* @param {string} [popReceipt] The pop receipt. +*/ +function QueueMessageResult(queue, messageId, popReceipt) { + if (queue) { + this.queue = queue; + } + + if (messageId) { + this.messageId = messageId; + } + + if (popReceipt) { + this.popReceipt = popReceipt; + } +} + +/** +* Builds an XML representation for a queue message +* +* @param {string} messageJs The queue message. +* @param {QueueMessageEncoder} The message encoder. +* @return {string} The XML queue message. +*/ +QueueMessageResult.serialize = function (messageJs, encoder) { + var doc = xmlbuilder.create(Constants.QueueConstants.QUEUE_MESSAGE_ELEMENT, { version: '1.0', encoding: 'utf-8' }); + + if (messageJs) { + var message; + if (encoder !== null && encoder !== undefined) { + message = encoder.encode(messageJs); + } else { + message = messageJs; + } + + doc.ele(Constants.QueueConstants.MESSAGE_TEXT_ELEMENT) + .txt(message) + .up(); + } else { + doc.ele(Constants.QueueConstants.MESSAGE_TEXT_ELEMENT).up(); + } + + return doc.doc().toString(); +}; + + +/** +* Pase the XML representation of a queue message to a QueueMessageResult object. +* +* @param {Object} messageXml The XML representation of the queue message. +* @param {QueueMessageEncoder} The message encoder. +* @return {QueueMessageResult} The QueueMessageResult object. +*/ +QueueMessageResult.parse = function (messageXml, encoder) { + var queueMessageResult = new QueueMessageResult(); + for (var property in messageXml) { + if (property === Constants.QueueConstants.MESSAGE_TEXT_ELEMENT) { + if (encoder !== null && encoder !== undefined) { + queueMessageResult.messageText = encoder.decode(messageXml[property]); + } else { + queueMessageResult.messageText = messageXml[property]; + } + } else { + var resultPropertyName = azureutil.normalizePropertyNameFromXML(property); + queueMessageResult[resultPropertyName] = messageXml[property]; + } + } + + // Convert dequeueCount to number + if (queueMessageResult.dequeueCount) { + queueMessageResult.dequeueCount = parseInt(queueMessageResult.dequeueCount); + } + + return queueMessageResult; +}; + +QueueMessageResult.prototype.getPropertiesFromHeaders = function (headers) { + var self = this; + + var setmessagePropertyFromHeaders = function (messageProperty, headerProperty) { + if (!self[messageProperty] && headers[headerProperty.toLowerCase()]) { + self[messageProperty] = headers[headerProperty.toLowerCase()]; + } + }; + + setmessagePropertyFromHeaders('popReceipt', HeaderConstants.POP_RECEIPT); + setmessagePropertyFromHeaders('timeNextVisible', HeaderConstants.TIME_NEXT_VISIBLE); +}; + +module.exports = QueueMessageResult; + +/***/ }), + +/***/ 71928: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var Constants = (__nccwpck_require__(82187).Constants); +var HeaderConstants = Constants.HeaderConstants; + +/** +* Creates a new QueueResult object. +* @class +* The QueueResult class is used to store the queue information. +* +* @property {string} name The queue name. +* @property {object} metadata The metadata key/value pair. +* @property {number} approximateMessageCount The approximate number of messages in the queue. This number is not lower than the actual number of messages in the queue, but could be higher. +* @property {Object.} signedIdentifiers The container ACL settings. See `[AccessPolicy]{@link AccessPolicy}` for detailed information. + * +* @constructor +* @param {string} [name] The queue name. +* @param {string} [metadata] The metadata key/value pair. +*/ +function QueueResult(name, metadata) { + if (name) { + this.name = name; + } + + if (metadata) { + this.metadata = metadata; + } +} + +QueueResult.parse = function (messageXml) { + var queueResult = new QueueResult(); + for (var property in messageXml) { + if (messageXml.hasOwnProperty(property)) { + queueResult[property.toLowerCase()] = messageXml[property]; + } + } + + return queueResult; +}; + +QueueResult.prototype.getPropertiesFromHeaders = function (headers) { + var self = this; + + var setPropertyFromHeaders = function (queueProperty, headerProperty, typeConverterFunc) { + if (!self[queueProperty] && headers[headerProperty.toLowerCase()]) { + if(typeConverterFunc) { + self[queueProperty] = typeConverterFunc(headers[headerProperty.toLowerCase()]); + } else{ + self[queueProperty] = headers[headerProperty.toLowerCase()]; + } + } + }; + + setPropertyFromHeaders('approximateMessageCount', HeaderConstants.APPROXIMATE_MESSAGES_COUNT, parseInt); +}; + +module.exports = QueueResult; + +/***/ }), + +/***/ 23474: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. + +var util = __nccwpck_require__(73837); + +/** + * The interface for classes that represent a encoder which can be used to specify how the queue service encodes and decodes queue messages. + * + * To specify how the queue service encodes and decodes queue messages, set `queueService.messageEncoder` to object of built-in encoder types + * `[TextBase64QueueMessageEncoder]{@link TextBase64QueueMessageEncoder}`, `[BinaryBase64QueueMessageEncoder]{@link BinaryBase64QueueMessageEncoder}`, `[TextXmlQueueMessageEncoder]{@link TextXmlQueueMessageEncoder}`, + * or custom implementation of the QueueMessageEncoder. + * + * @class + */ +function QueueMessageEncoder() { +} + +/** + * Function to encode queue messages. + * + * @param {object} [input] The target to be encoded. + * @return {string} + */ +QueueMessageEncoder.prototype.encode = function(input){ + return input; +}; + +/** + * Function to decode queue messages + * + * @param {string} [textToBeDecoded] The base64 string to be decoded. + * @returns {any} + */ +QueueMessageEncoder.prototype.decode = function(textToBeDecoded){ + return textToBeDecoded; +}; + + +/** + * Create a new TextBase64QueueMessageEncoder object + * @class + * + * Encode from utf-8 string to base64 string + * Decode from base64 string to utf-8 string. + * + * @constructor + * @extends {QueueMessageEncoder} + */ +function TextBase64QueueMessageEncoder(){ +} +util.inherits(TextBase64QueueMessageEncoder, QueueMessageEncoder); + +/** + * Encode from utf-8 string to base64 string + * @this TextBase64QueueMessageEncoder + * + * @param {string} [input] The target to be encoded. + * + * @return {string} + */ +TextBase64QueueMessageEncoder.prototype.encode = function(input){ + return Buffer.from(input, 'utf8').toString('base64'); +}; + +/** + * Decode from base64 string to utf-8 string. + * @this TextBase64QueueMessageEncoder + * + * @param {string} [textToBeDecoded] The base64 string to be decoded. + * + * @return {string} + */ +TextBase64QueueMessageEncoder.prototype.decode = function(textToDecode){ + return Buffer.from(textToDecode, 'base64').toString('utf8'); +}; + + +/** + * Create a new BinaryBase64QueueMessageEncoder object + * @class + * + * Encode from binary buffer to base64 string + * Decode from base64 string to binary buffer. + * + * @constructor + * @extends {QueueMessageEncoder} + */ +function BinaryBase64QueueMessageEncoder(){ +} +util.inherits(BinaryBase64QueueMessageEncoder, QueueMessageEncoder); + +/** + * Encode from binary buffer string to base64 string + * @this BinaryBase64QueueMessageEncoder + * + * @param {Buffer} [input] The target to be encoded. + * + * @return {string} + */ +BinaryBase64QueueMessageEncoder.prototype.encode = function(input){ + return input.toString('base64'); +}; + + +/** + * Decode from base64 string to binary buffer. + * @this BinaryBase64QueueMessageEncoder + * + * @param {string} [textToBeDecoded] The base64 string to be decoded. + * + * @return {Buffer} + */ +BinaryBase64QueueMessageEncoder.prototype.decode = function(textToDecode){ + return Buffer.from(textToDecode, 'base64'); +}; + + +/** + * Create a new TextXmlQueueMessageEncoder object + * @class + * + * Encode utf-8 string by escaping the xml markup characters. + * Decode from utf-8 string by unescaping the xml markup characters. + * + * @constructor + * @extends {QueueMessageEncoder} + */ +function TextXmlQueueMessageEncoder(){ +} +util.inherits(TextXmlQueueMessageEncoder, QueueMessageEncoder); + +/** + * Encode utf-8 string by escaping the xml markup characters. + * @this TextXmlQueueMessageEncoder + * + * @param {string} [input] The target to be encoded. + * + * @return {string} + */ +TextXmlQueueMessageEncoder.prototype.encode = function(input){ + return input.replace(/&/gm, '&') + .replace(//gm, '>') + .replace(/"/gm, '"') + .replace(/'/gm, '''); +}; + +/** + * Decode from utf-8 string by unescaping the xml markup characters. + * @this TextXmlQueueMessageEncoder + * + * @param {string} [textToBeDecoded] The base64 string to be decoded. + * + * @return {string} + */ +TextXmlQueueMessageEncoder.prototype.decode = function(textToDecode){ + return textToDecode.replace(/&/gm, '&') + .replace(/</gm, '<') + .replace(/>/gm, '>') + .replace(/"/gm, '"') + .replace(/'/gm, '\''); +}; + +module.exports = QueueMessageEncoder; +module.exports.TextBase64QueueMessageEncoder = TextBase64QueueMessageEncoder; +module.exports.BinaryBase64QueueMessageEncoder = BinaryBase64QueueMessageEncoder; +module.exports.TextXmlQueueMessageEncoder = TextXmlQueueMessageEncoder; + +/***/ }), + +/***/ 67948: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var util = __nccwpck_require__(73837); +var _ = __nccwpck_require__(7404); +var extend = __nccwpck_require__(99237); + +var azureCommon = __nccwpck_require__(82187); +var azureutil = azureCommon.util; +var SR = azureCommon.SR; +var validate = azureCommon.validate; + +var StorageServiceClient = azureCommon.StorageServiceClient; +var WebResource = azureCommon.WebResource; +var Constants = azureCommon.Constants; +var QueryStringConstants = Constants.QueryStringConstants; +var HeaderConstants = Constants.HeaderConstants; +var RequestLocationMode = Constants.RequestLocationMode; + +// Models requires +var QueueResult = __nccwpck_require__(71928); +var AclResult = azureCommon.AclResult; +var QueueMessageResult = __nccwpck_require__(4438); +var QueueMessageEncoder = __nccwpck_require__(23474); +var ServiceStatsParser = azureCommon.ServiceStatsParser; + +/** +* Creates a new QueueService object. +* If no connection string or storageaccount and storageaccesskey are provided, +* the AZURE_STORAGE_CONNECTION_STRING or AZURE_STORAGE_ACCOUNT and AZURE_STORAGE_ACCESS_KEY environment variables will be used. +* @class +* The QueueService class is used to perform operations on the Microsoft Azure Queue Service. +* +* For more information on using the Queue Service, as well as task focused information on using it from a Node.js application, see +* [How to Use the Queue Service from Node.js](http://azure.microsoft.com/en-us/documentation/articles/storage-nodejs-how-to-use-queues/). +* The following defaults can be set on the Queue service. +* messageEncoder The message encoder to specify how QueueService encodes and decodes the queue message. Default is `[TextXmlQueueMessageEncoder]{@link TextXmlQueueMessageEncoder}`. +* defaultTimeoutIntervalInMs The default timeout interval, in milliseconds, to use for request made via the Queue service. +* defaultClientRequestTimeoutInMs The default timeout of client requests, in milliseconds, to use for the request made via the Queue service. +* defaultMaximumExecutionTimeInMs The default maximum execution time across all potential retries, for requests made via the Queue service. +* defaultLocationMode The default location mode for requests made via the Queue service. +* useNagleAlgorithm Determines whether the Nagle algorithm is used for requests made via the Queue service; true to use the +* Nagle algorithm; otherwise, false. The default value is false. +* enableGlobalHttpAgent Determines whether global HTTP(s) agent is enabled; true to use Global HTTP(s) agent; otherwise, false to use +* http(s).Agent({keepAlive:true}). +* @constructor +* @augments {StorageServiceClient} +* +* @param {string} [storageAccountOrConnectionString] The storage account or the connection string. +* @param {string} [storageAccessKey] The storage access key. +* @param {string|object} [host] The host address. To define primary only, pass a string. +* Otherwise 'host.primaryHost' defines the primary host and 'host.secondaryHost' defines the secondary host. +* @param {string} [sas] The Shared Access Signature string. +* @param {string} [endpointSuffix] The endpoint suffix. +* @param {TokenCredential} [token] The {@link TokenCredential} object. +*/ +function QueueService(storageAccountOrConnectionString, storageAccessKey, host, sas, endpointSuffix, token) { + var storageServiceSettings = StorageServiceClient.getStorageSettings(storageAccountOrConnectionString, storageAccessKey, host, sas, endpointSuffix, token); + + QueueService['super_'].call(this, + storageServiceSettings._name, + storageServiceSettings._key, + storageServiceSettings._queueEndpoint, + storageServiceSettings._usePathStyleUri, + storageServiceSettings._sasToken, + token); + + if (this.anonymous) { + throw new Error(SR.ANONYMOUS_ACCESS_BLOBSERVICE_ONLY); + } + + /** + * @property {boolean} QueueService#messageEncoder + * @defaultvalue {QueueMessageEncoder} `[TextXmlQueueMessageEncoder]{@link TextXmlQueueMessageEncoder}`. + * The message encoder to specify how QueueService encodes and decodes the queue message. Default is `[TextXmlQueueMessageEncoder]{@link TextXmlQueueMessageEncoder}`. + */ + this.messageEncoder = new QueueMessageEncoder.TextXmlQueueMessageEncoder(); +} + +util.inherits(QueueService, StorageServiceClient); + +/** +* Gets the service stats for a storage account’s Queue service. +* +* @this {QueueService} +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise, `[result]{@link ServiceStats}` +* will contain the stats and `response` +* will contain information related to this operation. +*/ +QueueService.prototype.getServiceStats = function (optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('getServiceStats', function (v) { + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.get() + .withQueryOption(QueryStringConstants.COMP, 'stats') + .withQueryOption(QueryStringConstants.RESTYPE, 'service'); + + options.requestLocationMode = RequestLocationMode.PRIMARY_OR_SECONDARY; + + var processResponseCallback = function (responseObject, next) { + responseObject.serviceStatsResult = null; + if (!responseObject.error) { + responseObject.serviceStatsResult = ServiceStatsParser.parse(responseObject.response.body.StorageServiceStats); + } + + // function to be called after all filters + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.serviceStatsResult, returnObject.response); + }; + + // call the first filter + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Gets the properties of a storage account’s Queue service, including Microsoft Azure Storage Analytics. +* +* @this {QueueService} +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise, `[result]{@link ServiceProperties}` +* will contain the properties and `response` +* will contain information related to this operation. +*/ +QueueService.prototype.getServiceProperties = function (optionsOrCallback, callback) { + return this.getAccountServiceProperties(optionsOrCallback, callback); +}; + +/** +* Sets the properties of a storage account’s Queue service, including Microsoft Azure Storage Analytics. +* You can also use this operation to set the default request version for all incoming requests that do not have a version specified. +* +* @this {QueueService} +* @param {object} serviceProperties The service properties. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information +* if an error occurs; otherwise, `response` +* will contain information related to this operation. +*/ +QueueService.prototype.setServiceProperties = function (serviceProperties, optionsOrCallback, callback) { + return this.setAccountServiceProperties(serviceProperties, optionsOrCallback, callback); +}; + +/** +* Lists a segment containing a collection of queue items whose names begin with the specified prefix under the given account. +* +* @this {QueueService} +* @param {object} currentToken A continuation token returned by a previous listing operation. Please use 'null' or 'undefined' if this is the first operation. +* @param {object} [options] The request options. +* @param {int} [options.maxResults] Specifies the maximum number of queues to return per call to Azure storage. This does NOT affect list size returned by this function. (maximum: 5000) +* @param {string} [options.include] Include this parameter to specify that the queue's metadata be returned as part of the response body. (allowed values: '', 'metadata') +* **Note** that all metadata names returned from the server will be converted to lower case by NodeJS itself as metadata is set via HTTP headers and HTTP header names are case insensitive. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will contain `entries` and `continuationToken`. +* `entries` gives a list of `[queues]{@link QueueResult}` and the `continuationToken` is used for the next listing operation. +* `response` will contain information related to this operation. +*/ +QueueService.prototype.listQueuesSegmented = function (currentToken, optionsOrCallback, callback) { + this.listQueuesSegmentedWithPrefix(null /* prefix */, currentToken, optionsOrCallback, callback); +}; + +/** +* Lists a segment containing a collection of queue items under the given account. +* +* @this {QueueService} +* @param {string} prefix The prefix of the queue name. +* @param {object} currentToken A continuation token returned by a previous listing operation. Please use 'null' or 'undefined' if this is the first operation.* @param {string} [options.prefix] Filters the results to return only queues whose name begins with the specified prefix. +* @param {object} [options] The request options. +* @param {string} [options.marker] String value that identifies the portion of the list to be returned with the next list operation. +* @param {int} [options.maxResults] Specifies the maximum number of queues to return per call to Azure storage. This does NOT affect list size returned by this function. (maximum: 5000) +* @param {string} [options.include] Include this parameter to specify that the queue's metadata be returned as part of the response body. (allowed values: '', 'metadata') +* **Note** that all metadata names returned from the server will be converted to lower case by NodeJS itself as metadata is set via HTTP headers and HTTP header names are case insensitive. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will contain `entries` and `continuationToken`. +* `entries` gives a list of `[queues]{@link QueueResult}` and the `continuationToken` is used for the next listing operation. +* `response` will contain information related to this operation. +*/ +QueueService.prototype.listQueuesSegmentedWithPrefix = function (prefix, currentToken, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('listQueuesSegmentedWithPrefix', function (v) { + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.get(); + webResource.withQueryOption(QueryStringConstants.COMP, 'list') + .withQueryOption(QueryStringConstants.MAX_RESULTS, options.maxResults) + .withQueryOption(QueryStringConstants.INCLUDE, options.include) + .withQueryOption(QueryStringConstants.PREFIX, prefix); + + if(!azureutil.objectIsNull(currentToken)) { + webResource.withQueryOption(QueryStringConstants.MARKER, currentToken.nextMarker); + } + + options.requestLocationMode = azureutil.getNextListingLocationMode(currentToken); + + var processResponseCallback = function (responseObject, next) { + responseObject.listQueuesResult = null; + + if (!responseObject.error) { + responseObject.listQueuesResult = { + entries: null, + continuationToken: null + }; + responseObject.listQueuesResult.entries = []; + var queues = []; + + if (responseObject.response.body.EnumerationResults.Queues && responseObject.response.body.EnumerationResults.Queues.Queue) { + queues = responseObject.response.body.EnumerationResults.Queues.Queue; + + if (!_.isArray(queues)) { + queues = [ queues ]; + } + + queues.forEach(function (currentQueue) { + var queueResult = QueueResult.parse(currentQueue); + responseObject.listQueuesResult.entries.push(queueResult); + }); + + if(responseObject.response.body.EnumerationResults.NextMarker) { + responseObject.listQueuesResult.continuationToken = { + nextMarker: null, + targetLocation: null + }; + + responseObject.listQueuesResult.continuationToken.nextMarker = responseObject.response.body.EnumerationResults.NextMarker; + responseObject.listQueuesResult.continuationToken.targetLocation = responseObject.targetLocation; + } + } + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.listQueuesResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Checks to see if a queue exists. +* +* @this {QueueService} +* @param {string} queue The queue name. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {Function(error, result, response)} callback `error` will contain information +* if an error occurs; otherwise, `[result]{@link QueueResult}` will contain +* the queue information including `exists` boolean member. +* `response` will contain information related to this operation. +*/ +QueueService.prototype.doesQueueExist = function (queue, optionsOrCallback, callback) { + this._doesQueueExist(queue, false, optionsOrCallback, callback); +}; + +/** +* Creates a new queue under the given account. +* +* @this {QueueService} +* @param {string} queue The queue name. +* @param {object} [options] The request options. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link QueueResult}` will contain +* the queue information. +* `response` will contain information related to this operation. +*/ +QueueService.prototype.createQueue = function (queue, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('createQueue', function (v) { + v.string(queue, 'queue'); + v.queueNameIsValid(queue); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.put(queue); + if (options) { + webResource.addOptionalMetadataHeaders(options.metadata); + } + + var processResponseCallback = function (responseObject, next) { + responseObject.queueResult = null; + if (!responseObject.error) { + responseObject.queueResult = new QueueResult(queue); + if (options && options.metadata) { + responseObject.queueResult.metadata = options.metadata; + } + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.queueResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Creates a new queue under the given account if it doesn't exist. +* +* @this {QueueService} +* @param {string} queue The queue name. +* @param {object} [options] The request options. +* @param {object} [options.metadata] The metadata key/value pairs. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link QueueResult}` will contain +* the queue information including `created` boolean member and +* `response` will contain information related to this operation. +* +* @example +* var azure = require('azure-storage'); +* var queueService = azure.createQueueService(); +* queueService.createQueueIfNotExists('taskqueue', function(error) { +* if(!error) { +* // Queue created or exists +* } +* }); +*/ +QueueService.prototype.createQueueIfNotExists = function (queue, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('createQueueIfNotExists', function (v) { + v.string(queue, 'queue'); + v.queueNameIsValid(queue); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var self = this; + self._doesQueueExist(queue, true, options, function(error, result, response) { + var exists = result.exists; + result.created = false; + delete result.exists; + + if (error) { + callback(error, result, response); + } else if (exists) { + response.isSuccessful = true; + callback(error, result, response); + } else { + self.createQueue(queue, options, function(createError, responseQueue, createResponse) { + if (!createError) { + responseQueue.created = true; + } + else if (createError && createError.statusCode === Constants.HttpConstants.HttpResponseCodes.Conflict && createError.code === Constants.QueueErrorCodeStrings.QUEUE_ALREADY_EXISTS) { + createError = null; + responseQueue.created = false; + createResponse.isSuccessful = true; + } + + callback(createError, responseQueue, createResponse); + }); + } + }); +}; + +/** +* Permanently deletes the specified queue. +* +* @this {QueueService} +* @param {string} queue The queue name. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information if an error occurs; +* `response` will contain information related to this operation. +*/ +QueueService.prototype.deleteQueue = function (queue, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('deleteQueue', function (v) { + v.string(queue, 'queue'); + v.queueNameIsValid(queue); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.del(queue); + var processResponseCallback = function (responseObject, next) { + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Permanently deletes the specified queue if it exists. +* +* @this {QueueService} +* @param {string} queue The queue name. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will contain +* 'true' if the queue was deleted and 'false' if the queue did not exist. +* `response` will contain information related to this operation. +*/ +QueueService.prototype.deleteQueueIfExists = function (queue, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('deleteQueueIfExists', function (v) { + v.string(queue, 'queue'); + v.queueNameIsValid(queue); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var self = this; + self._doesQueueExist(queue, true, options, function existsCallback(error, existsResult, response) { + if (error) { + callback(error, existsResult.exists, response); + } else if (!existsResult.exists) { + response.isSuccessful = true; + callback(error, false, response); + } else { + self.deleteQueue(queue, options, function(deleteError, deleteResponse) { + var deleted; + if (!deleteError) { + deleted = true; + } else if (deleteError && deleteError.statusCode === Constants.HttpConstants.HttpResponseCodes.NotFound && deleteError.code === Constants.QueueErrorCodeStrings.QUEUE_NOT_FOUND) { + deleted = false; + deleteError = null; + deleteResponse.isSuccessful = true; + } + + callback(deleteError, deleted, deleteResponse); + }); + } + }); +}; + +/** +* Returns queue properties, including user-defined metadata. +* **Note** that all metadata names returned from the server will be converted to lower case by NodeJS itself as metadata is set via HTTP headers and HTTP header names are case insensitive. +* +* @this {QueueService} +* @param {string} queue The queue name. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link QueueResult}` will contain +* the queue information. +* `response` will contain information related to this operation. +*/ +QueueService.prototype.getQueueMetadata = function (queue, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('getQueueMetadata', function (v) { + v.string(queue, 'queue'); + v.queueNameIsValid(queue); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.get(queue) + .withQueryOption(QueryStringConstants.COMP, 'metadata'); + + options.requestLocationMode = Constants.RequestLocationMode.PRIMARY_OR_SECONDARY; + + var self = this; + var processResponseCallback = function (responseObject, next) { + responseObject.queueResult = null; + if (!responseObject.error) { + responseObject.queueResult = new QueueResult(queue); + responseObject.queueResult.metadata = self.parseMetadataHeaders(responseObject.response.headers); + responseObject.queueResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.queueResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Sets user-defined metadata on the specified queue. Metadata is associated with the queue as name-value pairs. +* +* @this {QueueService} +* @param {string} queue The queue name. +* @param {object} metadata The metadata key/value pairs. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link QueueResult}` will contain +* the queue information. +* `response` will contain information related to this operation. +*/ +QueueService.prototype.setQueueMetadata = function (queue, metadata, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('setQueueMetadata', function (v) { + v.string(queue, 'queue'); + v.queueNameIsValid(queue); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.put(queue) + .withQueryOption(QueryStringConstants.COMP, 'metadata') + .addOptionalMetadataHeaders(metadata); + + var processResponseCallback = function (responseObject, next) { + responseObject.queueResult = null; + if (!responseObject.error) { + responseObject.queueResult = new QueueResult(queue, metadata); + responseObject.queueResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.queueResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Adds a new message to the back of the message queue. +* The encoded message can be up to 64KB in size for versions 2011-08-18 and newer, or 8KB in size for previous versions. +* Unencoded messages must be in a format that can be included in an XML request with UTF-8 encoding. +* Queue messages are encoded using the `[TextXmlQueueMessageEncoder]{@link TextXmlQueueMessageEncoder}`. See queueService.messageEncoder to set encoder defaults. +* +* @this {QueueService} +* @param {string} queue The queue name. +* @param {string|Buffer} messageText The message text. +* @param {object} [options] The request options. +* @param {int} [options.messageTimeToLive] The time-to-live interval for the message, in seconds. The maximum time-to-live allowed is 7 days. If this parameter is omitted, the default time-to-live is 7 days +* @param {int} [options.visibilityTimeout] Specifies the new visibility timeout value, in seconds, relative to server time. The new value must be larger than or equal to 0, and cannot be larger than 7 days. The visibility timeout of a message cannot be set to a value later than the expiry time. visibilitytimeout should be set to a value smaller than the time-to-live value. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link QueueMessageResult}` will contain +* the message. +* `response` will contain information related to this operation. +* +* @example +* var azure = require('azure-storage'); +* var queueService = azure.createQueueService(); +* queueService.createMessage('taskqueue', 'Hello world!', function(error) { +* if(!error) { +* // Message inserted +* } +* }); +*/ +QueueService.prototype.createMessage = function (queue, messageText, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('createMessage', function (v) { + v.string(queue, 'queue'); + v.queueNameIsValid(queue); + v.callback(callback); + }); + + var xmlMessageDescriptor = QueueMessageResult.serialize(messageText, this.messageEncoder); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.post(queue + '/messages') + .withHeader(HeaderConstants.CONTENT_TYPE, 'application/atom+xml;charset="utf-8"') + .withHeader(HeaderConstants.CONTENT_LENGTH, Buffer.byteLength(xmlMessageDescriptor, 'utf8')) + .withQueryOption(QueryStringConstants.MESSAGE_TTL, options.messageTimeToLive) + .withQueryOption(QueryStringConstants.VISIBILITY_TIMEOUT, options.visibilityTimeout) + .withBody(xmlMessageDescriptor); + + var messageEncoder = this.messageEncoder; + + var processResponseCallback = function (responseObject, next) { + responseObject.queueMessageResults = []; + + if (responseObject.response && responseObject.response.body && responseObject.response.body.QueueMessagesList && responseObject.response.body.QueueMessagesList.QueueMessage) { + var messages = responseObject.response.body.QueueMessagesList.QueueMessage; + + if (!_.isArray(messages)) { + messages = [ messages ]; + } + + messages.forEach(function (message) { + var queueMessageResult = QueueMessageResult.parse(message, messageEncoder); + responseObject.queueMessageResults.push(queueMessageResult); + }); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, responseObject.queueMessageResults[0], returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, webResource.body, options, processResponseCallback); +}; + +/** +* Retrieve messages from the queue and makes them invisible to other consumers. +* +* @this {QueueService} +* @param {string} queue The queue name. +* @param {object} [options] The request options. +* @param {int} [options.numOfMessages] A nonzero integer value that specifies the number of messages to retrieve from the queue, up to a maximum of 32. By default, a single message is retrieved from the queue with this operation. +* @param {int} [options.visibilityTimeout] Required if not peek only. Specifies the new visibility timeout value, in seconds, relative to server time. The new value must be larger than or equal to 0, and cannot be larger than 7 days. The visibility timeout of a message can be set to a value later than the expiry time. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will contain +* a list of `[messages]{@link QueueMessageResult}`. +* `response` will contain information related to this operation. +* +* @example +* var azure = require('azure-storage'); +* var queueService = azure.createQueueService(); +* var queueName = 'taskqueue'; +* queueService.getMessages(queueName, function(error, serverMessages) { +* if(!error) { +* // Process the message in less than 30 seconds, the message +* // text is available in serverMessages[0].messagetext +* queueService.deleteMessage(queueName, serverMessages[0].messageId, serverMessages[0].popReceipt, function(error) { +* if(!error){ +* // Message deleted +* } +* }); +* } +* }); +*/ +QueueService.prototype.getMessages = function (queue, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('getMessages', function (v) { + v.string(queue, 'queue'); + v.queueNameIsValid(queue); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + delete options.peekOnly; + + this._getOrPeekMessages(queue, options, callback); +}; + +/** +* Retrieves a message from the queue and makes it invisible to other consumers. +* +* @this {QueueService} +* @param {string} queue The queue name. +* @param {object} [options] The request options. +* @param {int} [options.visibilityTimeout] Required if not peek only. Specifies the new visibility timeout value, in seconds, relative to server time. The new value must be larger than or equal to 0, and cannot be larger than 7 days. The visibility timeout of a message can be set to a value later than the expiry time. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link QueueMessageResult}` will contain +* the message. +* `response` will contain information related to this operation. +* +* @example +* var azure = require('azure-storage'); +* var queueService = azure.createQueueService(); +* var queueName = 'taskqueue'; +* queueService.getMessage(queueName, function(error, serverMessage) { +* if(!error) { +* // Process the message in less than 30 seconds, the message +* // text is available in serverMessage.messagetext +* queueService.deleteMessage(queueName, serverMessage.messageId, serverMessage.popReceipt, function(error) { +* if(!error){ +* // Message deleted +* } +* }); +* } +* }); +*/ +QueueService.prototype.getMessage = function (queue, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('getMessage', function (v) { + v.string(queue, 'queue'); + v.queueNameIsValid(queue); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + options.numOfMessages = 1; + + var finalCallback = function(error, messages, response){ + var message; + if(messages && messages.length > 0){ + message = messages[0]; + } + + callback(error, message, response); + }; + + this.getMessages(queue, options, finalCallback); +}; + +/** +* Retrieves messages from the front of the queue, without changing the messages visibility. +* +* @this {QueueService} +* @param {string} queue The queue name. +* @param {object} [options] The request options. +* @param {int} [options.numOfMessages] A nonzero integer value that specifies the number of messages to retrieve from the queue, up to a maximum of 32. By default, a single message is retrieved from the queue with this operation. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `result` will contain +* `[messages]{@link QueueMessageResult}`. +* `response` will contain information related to this operation. +*/ +QueueService.prototype.peekMessages = function (queue, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('peekMessages', function (v) { + v.string(queue, 'queue'); + v.queueNameIsValid(queue); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + options.peekOnly = true; + delete options.visibilityTimeout; + + this._getOrPeekMessages(queue, options, callback); +}; + +/** +* Retrieves a message from the front of the queue, without changing the message visibility. +* +* @this {QueueService} +* @param {string} queue The queue name. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link QueueMessageResult}` will contain +* the message. +* `response` will contain information related to this operation. +*/ +QueueService.prototype.peekMessage = function (queue, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('peekMessage', function (v) { + v.string(queue, 'queue'); + v.queueNameIsValid(queue); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + options.numOfMessages = 1; + + var finalCallback = function(error, messages, response){ + var message; + if(messages && messages.length > 0){ + message = messages[0]; + } + + callback(error, message, response); + }; + + this.peekMessages(queue, options, finalCallback); +}; + +/** +* Deletes a specified message from the queue. +* +* @this {QueueService} +* @param {string} queue The queue name. +* @param {string} messageId The message identifier of the message to delete. +* @param {string} popReceipt A valid pop receipt value returned from an earlier call to the Get Messages or Update Message operation +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information if an error occurs; +* `response` will contain information related to this operation. +*/ +QueueService.prototype.deleteMessage = function (queue, messageId, popReceipt, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('deleteMessage', function (v) { + v.string(queue, 'queue'); + v.queueNameIsValid(queue); + v.callback(callback); + }); + + if (azureutil.objectIsNull(popReceipt)) { + throw new Error(SR.INVALID_POP_RECEIPT); + } + + if (azureutil.objectIsNull(messageId)) { + throw new Error(SR.INVALID_MESSAGE_ID); + } + + var options = extend(true, {}, userOptions); + var webResource = WebResource.del(queue + '/messages/' + messageId) + .withQueryOption(QueryStringConstants.POP_RECEIPT, popReceipt, null, true); + + var processResponseCallback = function (responseObject, next) { + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Clears all messages from the queue. +* +* @this {QueueService} +* @param {string} queue The queue name. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information +* if an error occurs; otherwise +* `response` will contain information related to this operation. +*/ +QueueService.prototype.clearMessages = function (queue, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('clearMessages', function (v) { + v.string(queue, 'queue'); + v.queueNameIsValid(queue); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.del(queue + '/messages'); + + var processResponseCallback = function (responseObject, next) { + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Updates the visibility timeout of a message. You can also use this operation to update the contents of a message. +* A message must be in a format that can be included in an XML request with UTF-8 encoding, and the encoded message can be up to 64KB in size. +* +* @this {QueueService} +* @param {string} queue The queue name. +* @param {string} messageId The message identifier of the message to update. +* @param {string} popReceipt A valid pop receipt value returned from an earlier call to the Get Messages or Update Message operation +* @param {int} visibilityTimeout Specifies the new visibility timeout value, in seconds, relative to server time. The new value must be larger than or equal to 0, and cannot be larger than 7 days. The visibility timeout of a message can be set to a value later than the expiry time. +* @param {object} [options] The request options. +* @param {object} [options.messageText] The new message text. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link QueueMessageResult}` will contain +* the message result information. +* `response` will contain information related to this operation. +*/ +QueueService.prototype.updateMessage = function (queue, messageId, popReceipt, visibilityTimeout, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('updateMessage', function (v) { + v.string(queue, 'queue'); + v.queueNameIsValid(queue); + v.callback(callback); + }); + + if (azureutil.objectIsNull(popReceipt)) { + throw new Error(SR.INVALID_POP_RECEIPT); + } + + if (azureutil.objectIsNull(messageId)) { + throw new Error(SR.INVALID_MESSAGE_ID); + } + + var options = extend(true, {}, userOptions); + var content = null; + if (options.messageText) { + content = QueueMessageResult.serialize(options.messageText, this.messageEncoder); + } + + var contentLength = content ? Buffer.byteLength(content, 'utf8') : 0; + + var webResource = WebResource.put(queue + '/messages/' + messageId) + .withHeader(HeaderConstants.CONTENT_TYPE, 'application/atom+xml;charset="utf-8"') + .withHeader(HeaderConstants.CONTENT_LENGTH, contentLength) + .withQueryOption(QueryStringConstants.POP_RECEIPT, popReceipt, null, true) + .withQueryOption(QueryStringConstants.VISIBILITY_TIMEOUT, visibilityTimeout) + .withBody(content); + + var processResponseCallback = function (responseObject, next) { + responseObject.queueMessageResult = null; + if (!responseObject.error) { + responseObject.queueMessageResult = new QueueMessageResult(queue, messageId); + responseObject.queueMessageResult.getPropertiesFromHeaders(responseObject.response.headers); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.queueMessageResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, webResource.body, options, processResponseCallback); +}; + +/** +* Gets the queue's ACL. +* +* @this {QueueService} +* @param {string} queue The queue name. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link QueueResult}` will contain +* information for the queue. +* `response` will contain information related to this operation. +*/ +QueueService.prototype.getQueueAcl = function (queue, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('getQueueAcl', function (v) { + v.string(queue, 'queue'); + v.queueNameIsValid(queue); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.get(queue) + .withQueryOption(QueryStringConstants.COMP, 'acl'); + + options.requestLocationMode = Constants.RequestLocationMode.PRIMARY_OR_SECONDARY; + + var processResponseCallback = function (responseObject, next) { + responseObject.queueResult = null; + if (!responseObject.error) { + responseObject.queueResult = new QueueResult(queue); + responseObject.queueResult.getPropertiesFromHeaders(responseObject.response.headers); + responseObject.queueResult.signedIdentifiers = AclResult.parse(responseObject.response.body); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.queueResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Updates the queue's ACL. +* +* @this {QueueService} +* @param {string} queue The queue name. +* @property {Object.} signedIdentifiers The container ACL settings. See `[AccessPolicy]{@link AccessPolicy}` for detailed information. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information +* if an error occurs; otherwise `[result]{@link QueueResult}` will contain +* information for the queue. +* `response` will contain information related to this operation. +* @example +* var azure = require('azure-storage'); +* var SharedAccessPermissions = azure.QueueUtilities.SharedAccessPermissions; +* var queueService = azure.createQueueService(); +* var sharedAccessPolicies = [ +* {AccessPolicy: { +* Permissions: PROCESS, +* Start: startDate, +* Expiry: expiryDate +* }, +* Id: processOnly, +* }, +* {AccessPolicy: { +* Permissions: SharedAccessPermissions.PROCESS + SharedAccessPermissions.DELETE, +* Start: startDate, +* Expiry: expiryDate +* }, +* Id: processAndDelete, +* }]; +* +* queueService.setQueueAcl(queueName, sharedAccessPolicies, function(error, queueResult, response) { +* // do whatever +* }); +*/ +QueueService.prototype.setQueueAcl = function (queue, signedIdentifiers, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('setQueueAcl', function (v) { + v.string(queue, 'queue'); + v.queueNameIsValid(queue); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + var policies = null; + if (signedIdentifiers) { + if(_.isArray(signedIdentifiers)) { + throw new TypeError(SR.INVALID_SIGNED_IDENTIFIERS); + } + policies = AclResult.serialize(signedIdentifiers); + } + + var webResource = WebResource.put(queue) + .withQueryOption(QueryStringConstants.COMP, 'acl') + .withHeader(HeaderConstants.CONTENT_LENGTH, !azureutil.objectIsNull(policies) ? Buffer.byteLength(policies) : 0) + .withBody(policies); + + var processResponseCallback = function (responseObject, next) { + responseObject.containerResult = null; + if (!responseObject.error) { + responseObject.queueResult = new QueueResult(queue); + responseObject.queueResult.getPropertiesFromHeaders(responseObject.response.headers); + if (signedIdentifiers) { + responseObject.queueResult.signedIdentifiers = signedIdentifiers; + } + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.queueResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, webResource.body, options, processResponseCallback); +}; + +/** +* Retrieves a shared access signature token. +* +* @this {QueueService} +* @param {string} queue The queue name. +* @param {object} sharedAccessPolicy The shared access policy. +* @param {string} [sharedAccessPolicy.Id] The signed identifier. +* @param {object} [sharedAccessPolicy.AccessPolicy.Permissions] The permission type. +* @param {date|string} [sharedAccessPolicy.AccessPolicy.Start] The time at which the Shared Access Signature becomes valid (The UTC value will be used). +* @param {date|string} [sharedAccessPolicy.AccessPolicy.Expiry] The time at which the Shared Access Signature becomes expired (The UTC value will be used). +* @param {string} [sharedAccessPolicy.AccessPolicy.IPAddressOrRange] An IP address or a range of IP addresses from which to accept requests. When specifying a range, note that the range is inclusive. +* @param {string} sharedAccessPolicy.AccessPolicy.Protocols The protocols permitted for a request made with the account SAS. +* Possible values are both HTTPS and HTTP (https,http) or HTTPS only (https). The default value is https,http. +* @return {string} The shared access signature query string. Note this string does not contain the leading "?". +*/ +QueueService.prototype.generateSharedAccessSignature = function (queue, sharedAccessPolicy) { + // check if the QueueService is able to generate a shared access signature + if (!this.storageCredentials || !this.storageCredentials.generateSignedQueryString) { + throw new Error(SR.CANNOT_CREATE_SAS_WITHOUT_ACCOUNT_KEY); + } + + validate.validateArgs('generateSharedAccessSignature', function (v) { + v.string(queue, 'queue'); + v.queueNameIsValid(queue); + v.object(sharedAccessPolicy, 'sharedAccessPolicy'); + }); + + return this.storageCredentials.generateSignedQueryString(Constants.ServiceType.Queue, queue, sharedAccessPolicy, null); +}; + +/** +* Checks to see if a queue exists. +* @ignore +* +* @this {QueueService} +* @param {string} queue The queue name. +* @param {string} primaryOnly If true, the request will be executed against the primary storage location. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {Function(error, result, response)} callback `error` will contain information +* if an error occurs; otherwise, `result` will contain +* the queue information including `exists` boolean member +* and `response` will contain information related to this operation. +* +*/ +QueueService.prototype._doesQueueExist = function (queue, primaryOnly, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('doesQueueExist', function(v) { + v.string(queue, 'queue'); + v.queueNameIsValid(queue); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + var webResource = WebResource.head(queue) + .withQueryOption(QueryStringConstants.COMP, 'metadata'); + + if(primaryOnly === false) { + options.requestLocationMode = RequestLocationMode.PRIMARY_OR_SECONDARY; + } + + var processResponseCallback = function(responseObject, next) { + responseObject.queueResult = new QueueResult(queue); + responseObject.queueResult.exists = false; + + if (!responseObject.error) { + responseObject.queueResult.exists = true; + responseObject.queueResult.getPropertiesFromHeaders(responseObject.response.headers); + + } else if (responseObject.error && responseObject.error.statusCode === Constants.HttpConstants.HttpResponseCodes.NotFound) { + responseObject.error = null; + responseObject.queueResult.exists = false; + responseObject.response.isSuccessful = true; + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.queueResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** + * @ignore + */ +QueueService.prototype._getOrPeekMessages = function (queue, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('_getOrPeekMessages', function (v) { + v.string(queue, 'queue'); + v.queueNameIsValid(queue); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + if (!options.numOfMessages) { + options.numOfMessages = 1; + } + + var webResource = WebResource.get(queue + '/messages') + .withQueryOption(QueryStringConstants.NUM_OF_MESSAGES, options.numOfMessages) + .withQueryOption(QueryStringConstants.VISIBILITY_TIMEOUT, options.visibilityTimeout) + .withQueryOption(QueryStringConstants.PEEK_ONLY, options.peekOnly); + + if (options.peekOnly) { + // For peek message, it's a read-only action and can be performed against secondary endpoint. + options.requestLocationMode = RequestLocationMode.PRIMARY_OR_SECONDARY; + } + + + var messageEncoder = this.messageEncoder; + var processResponseCallback = function (responseObject, next) { + responseObject.queueMessageResults = null; + + if (!responseObject.error) { + responseObject.queueMessageResults = []; + + if (responseObject.response.body.QueueMessagesList && responseObject.response.body.QueueMessagesList.QueueMessage) { + var messages = responseObject.response.body.QueueMessagesList.QueueMessage; + + if (!_.isArray(messages)) { + messages = [ messages ]; + } + + messages.forEach(function (message) { + var queueMessageResult = QueueMessageResult.parse(message, messageEncoder); + responseObject.queueMessageResults.push(queueMessageResult); + }); + } + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.queueMessageResults, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Retrieves a queue URL. +* +* @param {string} queue The queue name. +* @param {string} [sasToken] The Shared Access Signature token. +* @param {boolean} [primary] A boolean representing whether to use the primary or the secondary endpoint. +* @return {string} The formatted URL string. +* @example +* var azure = require('azure-storage'); +* var queueService = azure.createQueueService(); +* var sharedAccessPolicy = { +* AccessPolicy: { +* Permissions: azure.QueueUtilities.SharedAccessPermissions.READ, +* Start: startDate, +* Expiry: expiryDate +* }, +* }; +* +* var sasToken = queueService.generateSharedAccessSignature(queue, sharedAccessPolicy); +* var sasUrl = queueService.getUrl(queue, sasToken); +*/ +QueueService.prototype.getUrl = function (queue, sasToken, primary) { + validate.validateArgs('getUrl', function (v) { + v.string(queue, 'queue'); + v.queueNameIsValid(queue); + }); + + return this._getUrl(queue, sasToken, primary); +}; + +module.exports = QueueService; + + +/***/ }), + +/***/ 98801: +/***/ ((module, exports) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Expose 'QueueUtilities'. +exports = module.exports; + +/** +* Defines enums for use with the Queue service. +* @namespace QueueUtilities +*/ +var QueueUtilities = { + /** + * Permission types. + * + * @const + * @enum {string} + */ + SharedAccessPermissions: { + READ: 'r', + ADD: 'a', + UPDATE: 'u', + PROCESS: 'p' + } +}; + +module.exports = QueueUtilities; + +/***/ }), + +/***/ 63346: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +var _ = __nccwpck_require__(7404); +var util = __nccwpck_require__(73837); +var guid = __nccwpck_require__(48073); + +var azureCommon = __nccwpck_require__(82187); +var azureutil = azureCommon.util; +var SR = azureCommon.SR; + +var TableUtilities = __nccwpck_require__(581); +var EdmType = TableUtilities.EdmType; + +/** +* Get the Edm type of an object. +* +* @param {object} value A typed instance. +* @return {string} The Edm type. +*/ +exports.propertyType = function (value, guessNumberType) { + if (_.isNumber(value)) { + if (guessNumberType) { + if (azureutil.objectIsInt(value)) { + return 'Edm.Int32'; + } else { + return 'Edm.Double'; + } + } else { + return null; + } + } else if (_.isBoolean(value)) { + return 'Edm.Boolean'; + } else if (_.isDate(value)) { + return 'Edm.DateTime'; + } else { + return 'Edm.String'; + } +}; + +/** +* Convert a JSON value from over the wire into the correct EDM type. +* +* Note that Int64, is remaining a string. Converting it to a Number would lose precision. +* Int32, Boolean, and Double should already be the correct non-string types +* +* @param {string} type The type of the value as it appears in the type attribute. +* @param value The value in JSON format. +* @return {object} The unserialized value. +*/ +exports.deserializeValueFromJson = function (type, value) { + if (type) { + switch (type) { + case EdmType.BINARY: + return Buffer.from(value, 'base64'); + case EdmType.DATETIME: + return new Date(value); + case EdmType.GUID: + return value; + case EdmType.DOUBLE: + // Account for Infinity and NaN: + if (typeof value !== 'number') { + return parseFloat(value); + } + return value; + case EdmType.INT32: + case EdmType.INT64: + case EdmType.STRING: + case EdmType.BOOLEAN: + return value; + default: + throw new Error(util.format(SR.TYPE_NOT_SUPPORTED, type)); + } + } else { + return value; + } +}; + +/** +* Convert a raw EdmType value into the JSON value expected to be sent over the wire. +* +* TODO: validate correct input types? +* Expects Edm.Int64 and Edm.String to be string, Edm.Double and Edm.Int32 to be Number, +* Edm.Guid to be an array or buffer compatible with Node.uuid, Edm.Binary to be a Node Buffer, Edm.DateTime to be a Date, +* and Edm.Boolean to be a boolean. +* +* @param {string} type The type of the value as it will appear in the type attribute. +* @param {string} value The value +* @return {object} The serialized value. +*/ +exports.serializeValue = function (type, value) { + switch (type) { + case EdmType.BINARY: + if (Buffer.isBuffer(value)) { + return value.toString('base64'); + } + return value; + case EdmType.DATETIME: + if (_.isDate(value)) { + return value.toISOString(); + } + return value; + case EdmType.GUID: + if (Buffer.isBuffer(value) || _.isArray(value)) { + return guid.unparse(value); + } + return value; + case EdmType.INT64: + case EdmType.DOUBLE: + return value.toString(); + case EdmType.INT32: + if (value === Number.POSITIVE_INFINITY) { + return 'Infinity'; + } + if (value === Number.NEGATIVE_INFINITY) { + return '-Infinity'; + } + if (azureutil.objectIsNaN(value)) { + return 'NaN'; + } + return value; + case EdmType.STRING: + case EdmType.BOOLEAN: + return value; + default: + throw new Error(SR.TYPE_NOT_SUPPORTED + type); + } +}; + +/* +* Determines if a type annotation is required for the input type when sending JSON data to the service. +*/ +exports.isTypeRequired = function(type, value) { + switch (type) { + case EdmType.BINARY: + case EdmType.INT64: + case EdmType.DATETIME: + case EdmType.GUID: + case EdmType.DOUBLE: + return true; + case EdmType.INT32: + if (typeof value !== 'number' || value === Number.POSITIVE_INFINITY || value === Number.NEGATIVE_INFINITY || (azureutil.objectIsNaN(value))) { + return true; + } + return false; + case EdmType.STRING: + case EdmType.BOOLEAN: + return false; + default: + throw new Error(util.format(SR.TYPE_NOT_SUPPORTED, type)); + } +}; + +/** +* Serializes value into proper value to be used in odata query value. +* +* @param {object} value The value to be serialized. +* @return {string} The serialized value. +*/ +exports.serializeQueryValue = function (value, type) { + var edmType = type || exports.propertyType(value, true); + switch (edmType) { + case EdmType.INT32: + return value.toString(); + case EdmType.BOOLEAN: + return value ? 'true' : 'false'; + case EdmType.DOUBLE: + return value.toString(); + case EdmType.INT64: + return value.toString() + 'L'; + case EdmType.DATETIME: + if(_.isDate(value)) { + var dateTimeString = value.toISOString(); + return 'datetime\'' + dateTimeString + '\''; + } + throw new Error(util.format(SR.INVALID_EDM_TYPE, value, type)); + case EdmType.GUID: + return 'guid\'' + value.toString() + '\''; + case EdmType.BINARY: + return 'X\'' + value.toString('hex') + '\''; + default: + return '\'' + value.toString().replace(/'/g, '\'\'') + '\''; + } +}; + +/***/ }), + +/***/ 8678: +/***/ ((module, exports, __nccwpck_require__) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var util = __nccwpck_require__(73837); + +var azureCommon = __nccwpck_require__(82187); +var azureutil = azureCommon.util; +var SR = azureCommon.SR; +var Constants = azureCommon.Constants; +var edmHandler = __nccwpck_require__(63346); + +var prefixLength = Constants.TableConstants.ODATA_PREFIX.length; +var suffixLength = Constants.TableConstants.ODATA_TYPE_SUFFIX.length; + +exports = module.exports; + +/* Serialize an entity to an Odata (Json based) payload +* Input must be in the following format: +* { stringValue: { '$': 'Edm.String', '_': 'my string' }, myInt: { '$': 'Edm.Int32', '_': 3 } } +*/ +exports.serializeJson = function (entity) { + function normalizeEntityProperty(property) { + if(azureutil.objectIsNull(property)) { + return { _: property }; + } + + if (typeof property === 'object' && property.hasOwnProperty(Constants.TableConstants.ODATA_VALUE_MARKER)) { + return property; + } + + var result = { _: property }; + result[Constants.TableConstants.ODATA_TYPE_MARKER] = edmHandler.propertyType(property, true); + + return result; + } + + var result = {}; + for (var propName in entity) { + // ignore if .metadata or null or undefined + if (propName !== Constants.TableConstants.ODATA_METADATA_MARKER) { + var property = normalizeEntityProperty(entity[propName]); + if (!azureutil.objectIsNull(property[Constants.TableConstants.ODATA_VALUE_MARKER])) { + var value = property[Constants.TableConstants.ODATA_VALUE_MARKER]; + var type = property[Constants.TableConstants.ODATA_TYPE_MARKER]; + + if (type === undefined) { + type = edmHandler.propertyType(value, true); + } + + result[propName] = edmHandler.serializeValue(type, value); + if (edmHandler.isTypeRequired(type, value)) { + result[propName + Constants.TableConstants.ODATA_TYPE_SUFFIX] = type; + } + } + } + } + + var replacer = function(key, value) { + if (value === Number.POSITIVE_INFINITY) { + return 'Infinity'; + } + if (value === Number.NEGATIVE_INFINITY) { + return '-Infinity'; + } + if (azureutil.objectIsNaN(value)) { + return 'NaN'; + } + return value; + }; + + return JSON.stringify(result, replacer); +}; + +/* +Input: The body of the HTTP response from the server from a table list as JSON (responseObject.response.body). + +Return: +This will return an array in the following format: + +[ + tableName1, + tableName2 +] + +For example, + +[ + myTable1, + myTable2 +] + +*/ +exports.parseJsonTables = function (response) { + var result = []; + + if (response.value) { + for (var i = 0; i < response.value.length; i++) { + var entity = response.value[i].TableName; + result.push(entity); + } + } + + return result; +}; + +/* +Input: The body of the HTTP response from the server from a table query as JSON (responseObject.response.body). + +Return: +This will return an array in the following format: + +[ + {{ '$': edmHandler1, '_': value1}, { '$': edmHandler2, '_': value2}, { '$': edmHandler3, '_': value3}}, + {{ '$': edmHandler4, '_': value4}, { '$': edmHandler5, '_': value5}, { '$': edmHandler6, '_': value6}} +] + +For example, + +[ + {{ '$': Edm.Int32, '_': 42}, { '$': Edm.String, '_': 'sample string'}, { '$': Edm.Boolean, '_': false}}, + {{ '$': Edm.Int64, '_': 42}, { '$': Edm.String, '_': 'sample string 2'}, { '$': Edm.Boolean, '_': true}} +] + +*/ +exports.parseJsonEntities = function (response, autoResolveProperties, propertyResolver, entityResolver) { + if (!response.value) { + return [exports.parseJsonSingleEntity(response, autoResolveProperties, propertyResolver, entityResolver)]; + } else { + var result = []; + + for (var i = 0; i < response.value.length; i++) { + var rawEntity = response.value[i]; + var entity = exports.parseJsonSingleEntity(rawEntity, autoResolveProperties, propertyResolver, entityResolver); + result.push(entity); + } + + return result; + } +}; + +exports.parseJsonSingleEntity = function(rawEntity, autoResolveProperties, propertyResolver, entityResolver) { + var rawEntityProperties = {}; + var entityPropertyTypes = {PartitionKey: 'Edm.String', RowKey: 'Edm.String', Timestamp: 'Edm.DateTime'}; + var odataMetadata = {}; + + // parse properties + for (var entityPropertyName in rawEntity) { + if (azureutil.stringStartsWith(entityPropertyName, Constants.TableConstants.ODATA_PREFIX)) { + odataMetadata[entityPropertyName.slice(prefixLength)] = rawEntity[entityPropertyName]; + } else if (azureutil.stringEndsWith(entityPropertyName, Constants.TableConstants.ODATA_TYPE_SUFFIX)) { + entityPropertyTypes[entityPropertyName.slice(0, entityPropertyName.length - suffixLength)] = rawEntity[entityPropertyName]; + } else { + rawEntityProperties[entityPropertyName] = rawEntity[entityPropertyName]; + } + } + + // make sure etag is set + if (!odataMetadata.etag && rawEntityProperties.Timestamp) { + var timestampString = Buffer.from(rawEntityProperties.Timestamp).toString(); + odataMetadata.etag = 'W/"datetime\'' + timestampString + '\'"'; + } + + var entity = {}; + for (var entityPropertyName in rawEntityProperties) { + if (rawEntityProperties.hasOwnProperty(entityPropertyName)) { + // set the type, if given in the response + var entityPropertyType = entityPropertyTypes[entityPropertyName]; + entity[entityPropertyName] = {}; + + // use the given property resolver if present, otherwise infer type if undefined + if (propertyResolver) { + // partition key, row key, name, value, type if present + entityPropertyType = propertyResolver(rawEntityProperties.PartitionKey, rawEntityProperties.RowKey, entityPropertyName, rawEntityProperties[entityPropertyName], entityPropertyType); + } + if (!entityPropertyType && autoResolveProperties) { + entityPropertyType = edmHandler.propertyType(rawEntityProperties[entityPropertyName], false); + } + + if (entityPropertyType) { + entity[entityPropertyName][Constants.TableConstants.ODATA_TYPE_MARKER] = entityPropertyType; + } + + try { + entity[entityPropertyName][Constants.TableConstants.ODATA_VALUE_MARKER] = edmHandler.deserializeValueFromJson(entityPropertyType, rawEntityProperties[entityPropertyName]); + } catch (err) { + if (propertyResolver) { + // if a property resolver was used and the type is invalid, throw an appropriate error + throw new Error(util.format(SR.INVALID_PROPERTY_RESOLVER, entityPropertyName, entityPropertyType, rawEntityProperties[entityPropertyName])); + } else { + throw err; + } + } + } + } + + entity[Constants.TableConstants.ODATA_METADATA_MARKER] = odataMetadata; + + if (entityResolver) { + entity = entityResolver(entity); + } + + return entity; +}; + + +/***/ }), + +/***/ 2600: +/***/ ((module, exports, __nccwpck_require__) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +var util = __nccwpck_require__(73837); +var azureCommon = __nccwpck_require__(82187); +var WebResource = azureCommon.WebResource; +var SR = azureCommon.SR; +var Constants = azureCommon.Constants; +var HeaderConstants = Constants.HeaderConstants; +var TableConstants = Constants.TableConstants; +var entityResult = __nccwpck_require__(255); + +exports = module.exports; + +/** +* Retrieves the entity path from the table name and an entity descriptor. +* @ignore +* +* @param {string} table The table name. +* @param {object} entity The entity descriptor. +* @return {string} The entity path. +*/ +function getEntityPath (tableName, partitionKey, rowKey) { + var path = '/' + tableName; + + if (typeof (partitionKey) === 'string' && typeof (rowKey) === 'string') { + // Escape single quotes according to OData Protocol Specification: "single quotes within string literals are represented as two consecutive single quotes". + partitionKey = partitionKey.replace(/'/g, '\'\''); + rowKey = rowKey.replace(/'/g, '\'\''); + path = path + '(PartitionKey=\'' + encodeURIComponent(partitionKey.toString('utf8')) + '\',RowKey=\'' + encodeURIComponent(rowKey.toString('utf8')) + '\')'; + } else { + throw new Error(SR.INCORRECT_ENTITY_KEYS); + } + + return path; +} + +/** +* Constructs the web resource for a table operation. +* +* @param {string} operation The operation to perform. +* @param {string} table The table name. +* @param {object} entityDescriptor The entity descriptor. +* @param {object} [options] The create options or callback function. +* @param {boolean} [options.checkEtag] Boolean value indicating weather the etag should be matched or not. +* @param {string} [options.echoContent] Whether or not to return the entity upon a successful insert. Default to false. +* @param {string} [options.payloadFormat] The payload format to use for the request. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @return {webResource} +*/ +exports.constructEntityWebResource = function (operation, table, entityDescriptor, options) { + var webResource = null; + if (operation === TableConstants.Operations.INSERT) { + webResource = WebResource.post(table) + .withHeader(HeaderConstants.PREFER, options.echoContent ? HeaderConstants.PREFER_CONTENT : HeaderConstants.PREFER_NO_CONTENT); + } else { + var partitionKey; + var rowKey; + + if (typeof (entityDescriptor.PartitionKey) === 'string') { + partitionKey = entityDescriptor.PartitionKey; + } else { + partitionKey = entityDescriptor.PartitionKey[TableConstants.ODATA_VALUE_MARKER]; + } + + if (typeof (entityDescriptor.RowKey) === 'string') { + rowKey = entityDescriptor.RowKey; + } else { + rowKey = entityDescriptor.RowKey[TableConstants.ODATA_VALUE_MARKER]; + } + + var path = getEntityPath(table, partitionKey, rowKey); + + if (operation === TableConstants.Operations.DELETE) { + webResource = WebResource.del(path); + } else if (operation === TableConstants.Operations.MERGE || operation === TableConstants.Operations.INSERT_OR_MERGE) { + webResource = WebResource.merge(path); + } else if (operation === TableConstants.Operations.REPLACE || operation === TableConstants.Operations.INSERT_OR_REPLACE) { + webResource = WebResource.put(path); + } else if (operation === TableConstants.Operations.RETRIEVE) { + webResource = WebResource.get(path); + } else { + throw new Error(util.format(SR.INVALID_TABLE_OPERATION, operation)); + } + } + + if (operation === TableConstants.Operations.DELETE || operation === TableConstants.Operations.REPLACE || operation === TableConstants.Operations.MERGE) { + webResource.withHeader(HeaderConstants.IF_MATCH, entityResult.getEtag(entityDescriptor) || '*'); + } + + var entitySerializedDescriptor; + if (!(operation === TableConstants.Operations.DELETE || operation === TableConstants.Operations.RETRIEVE)) { + entitySerializedDescriptor = entityResult.serialize(entityDescriptor); + } + + exports.setTableRequestHeadersAndBody(webResource, entitySerializedDescriptor, options.payloadFormat); + + return webResource; +}; + +/** +* Sets the table request headers. +* +* @param {string} webResource The webResource to add headers to. +* @param {object} [body] The body of the request. +*/ +exports.setTableRequestHeadersAndBody = function (webResource, body, acceptType) { + if (body) { + webResource.withHeader(HeaderConstants.CONTENT_LENGTH, Buffer.byteLength(body, 'utf8')) + .withBody(body) + .withHeader(HeaderConstants.CONTENT_TYPE, HeaderConstants.JSON_CONTENT_TYPE_VALUE); + } + + webResource.withHeader(HeaderConstants.ACCEPT, acceptType) + .withHeader(HeaderConstants.MAX_DATA_SERVICE_VERSION, TableConstants.DEFAULT_DATA_SERVICE_VERSION); +}; + +/***/ }), + +/***/ 92260: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var util = __nccwpck_require__(73837); +var azureCommon = __nccwpck_require__(82187); +var SharedKey = azureCommon.SharedKey; +var azureutil = azureCommon.util; +var Constants = azureCommon.Constants; +var HeaderConstants = Constants.HeaderConstants; +var QueryStringConstants = Constants.QueryStringConstants; + +/** +* Creates a new SharedKeyTable object. +* +* @constructor +* @param {string} storageAccount The storage account. +* @param {string} storageAccessKey The storage account's access key. +* @param {bool} usePathStyleUri Boolean value indicating if the path, or the hostname, should include the storage account. +*/ +function SharedKeyTable(storageAccount, storageAccessKey, usePathStyleUri) { + SharedKeyTable['super_'].call(this, + storageAccount, + storageAccessKey, + usePathStyleUri); +} + +util.inherits(SharedKeyTable, SharedKey); + +/** +* Signs a request with the Authentication header. +* +* @param {WebResource} The webresource to be signed. +* @param {function(error)} callback The callback function. +*/ +SharedKeyTable.prototype.signRequest = function (webResource, callback) { + var getvalueToAppend = function (value) { + if (azureutil.objectIsNull(value)) { + return '\n'; + } else { + return value + '\n'; + } + }; + + var stringToSign = + webResource.method + '\n' + + getvalueToAppend(webResource.headers[HeaderConstants.CONTENT_MD5]) + + getvalueToAppend(webResource.headers[HeaderConstants.CONTENT_TYPE]) + + getvalueToAppend(webResource.headers[HeaderConstants.MS_DATE]) + + this._getCanonicalizedResource(webResource); + + var signature = this.signer.sign(stringToSign); + + webResource.withHeader(HeaderConstants.AUTHORIZATION, 'SharedKey ' + this.storageAccount + ':' + signature); + callback(null); +}; + +/* +* Retrieves the webresource's canonicalized resource string. +* @param {WebResource} webResource The webresource to get the canonicalized resource string from. +* @return {string} The canonicalized resource string. +*/ +SharedKeyTable.prototype._getCanonicalizedResource = function (webResource) { + var path = '/'; + if (webResource.path) { + path = webResource.path; + } + + var canonicalizedResource = '/' + this.storageAccount + path; + + var queryStringValues = webResource.queryString; + if (queryStringValues[QueryStringConstants.COMP]) { + canonicalizedResource += '?comp=' + queryStringValues[QueryStringConstants.COMP]; + } + + return canonicalizedResource; +}; + +module.exports = SharedKeyTable; + +/***/ }), + +/***/ 54916: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var azureCommon = __nccwpck_require__(82187); +var azureutil = azureCommon.util; +var Md5Wrapper = __nccwpck_require__(11007); +var StorageServiceClient = azureCommon.StorageServiceClient; +var WebResource = azureCommon.WebResource; +var Constants = azureCommon.Constants; +var HeaderConstants = Constants.HeaderConstants; +var TableConstants = Constants.TableConstants; + +var RequestHandler = __nccwpck_require__(2600); +var entityResult = __nccwpck_require__(255); + +/** +* Creates a new BatchResult. +* +* @param {TableService} tableService The table service. +* @param {string} table The table name. +* @param {array} operations The array of batch operations. +* @constructor +* @ignore +*/ +function BatchResult(tableService, table, operations) { + this.tableService = tableService; + this.table = table; + this.operations = operations; + this.batchBoundary = 'batch_' + BatchResult._getBoundary(); + this.changesetBoundary = 'changeset_' + BatchResult._getBoundary(); +} + +/** +* Gets a boundary string. +* +* @return {string} The boundary string. +* @ignore +*/ +BatchResult._getBoundary = function () { + return (new Md5Wrapper().createMd5Hash()).update('' + (new Date()).getTime()).digest('hex'); +}; + +/** +* Constructs the batch web request. +* +* @return {WebResource} The batch WebResource. +* @ignore +*/ +BatchResult.prototype.constructWebResource = function () { + var webResource = WebResource.post('$batch') + .withRawResponse(true); + + webResource.withHeader(HeaderConstants.CONTENT_TYPE, 'multipart/mixed; charset="utf-8"; boundary=' + this.batchBoundary); + webResource.withHeader(HeaderConstants.DATA_SERVICE_VERSION, '3.0;'); + webResource.withHeader(HeaderConstants.MAX_DATA_SERVICE_VERSION, '3.0;NetFx'); + + return webResource; +}; + +/** +* Serializes the batch web body. +* +* @return {string} The serialized batch content. +* @ignore +*/ +BatchResult.prototype.serialize = function () { + var body = '--' + this.batchBoundary + '\n'; + + if (this.operations.length === 1 && this.operations[0].type === TableConstants.Operations.RETRIEVE) { + body += HeaderConstants.CONTENT_TYPE + ': application/http\n'; + body += HeaderConstants.CONTENT_TRANSFER_ENCODING + ': binary\n\n'; + body += this._serializeOperation(this.operations[0]); + } else { + body += HeaderConstants.CONTENT_TYPE + ': multipart/mixed;charset="utf-8";boundary=' + this.changesetBoundary + '\n\n'; + + for (var i = 0; i < this.operations.length; i++) { + body += '--' + this.changesetBoundary + '\n'; + body += HeaderConstants.CONTENT_TYPE + ': application/http\n'; + body += HeaderConstants.CONTENT_TRANSFER_ENCODING + ': binary\n\n'; + body += this._serializeOperation(this.operations[i], i) + '\n'; + } + body += '--' + this.changesetBoundary + '--\n'; + } + body += '--' + this.batchBoundary + '--'; + + return body; +}; + +/** +* Serializes a request within the batch. +* +* @param {object} The operation to serialize. +* @param {number} The index of the operation in the operations arrray. +* @return {string} The serialized operation content. +* @ignore +*/ +BatchResult.prototype._serializeOperation = function (operation, count) { + operation.options.payloadFormat = operation.options.payloadFormat || this.tableService.defaultPayloadFormat; + var webResource = RequestHandler.constructEntityWebResource(operation.type, this.table, operation.entity, operation.options); + + if (count) { + webResource.headers[HeaderConstants.CONTENT_ID] = count; + } + + var contentType = webResource.headers[HeaderConstants.CONTENT_TYPE]; + if (contentType) { + if (!azureutil.stringEndsWith(contentType, ';')) { + webResource.headers[HeaderConstants.CONTENT_TYPE] += ';'; + } + webResource.headers[HeaderConstants.CONTENT_TYPE] += 'type=entry'; + } + + this.tableService._setRequestUrl(webResource); + + var content = webResource.method + ' ' + webResource.uri + ' HTTP/1.1\n'; + + Object.keys(webResource.headers).forEach(function (header) { + content += header + ': ' + webResource.headers[header] + '\n'; + }); + + content += '\n'; + content += webResource.body || ''; + + return content; +}; + +/** +* Parses a batch response. +* +* @param {object} responseObject The response object for the batch request. +* @return {array} An array with the processed / parsed responses. +*/ +BatchResult.prototype.parse = function (responseObject) { + var responses = null; + if (responseObject && responseObject.response && responseObject.response.body && + typeof responseObject.response.body === 'string') { + responses = []; + var rawResponses = responseObject.response.body.split(TableConstants.CHANGESET_DELIMITER); + + if(rawResponses.length === 1) { + rawResponses = responseObject.response.body.split(TableConstants.BATCH_DELIMITER); + } + + var self = this; + rawResponses.forEach(function (rawResponse) { + // Find HTTP/1.1 CODE line + var httpLocation = rawResponse.indexOf('HTTP/1.1'); + if (httpLocation !== -1) { + rawResponse = rawResponse.substring(httpLocation); + + // valid response + var response = self._parseOperation(rawResponse); + responses.push(response); + } + }); + } + + return responses; +}; + +/** +* Parses a partial response. +* +* @param {string} rawResponse The raw, unparsed, http response from the server for the batch response. +* @return {object} A response object. +*/ +BatchResult.prototype._parseOperation = function (rawResponse) { + var responseObject = { + error: null, + response: { } + }; + + // Split into multiple lines and process them + var responseLines = rawResponse.split('\r\n'); + + if (responseLines.length > 0) { + // Retrieve response code + var headers = responseLines.shift().split(' '); + if (headers.length >= 2) { + responseObject.response.statusCode = parseInt(headers[1]); + responseObject.response.isSuccessful = WebResource.validResponse(responseObject.response.statusCode); + } + + // Populate headers + responseObject.response.headers = { }; + responseObject.response.body = ''; + + var isBody = false; + responseLines.forEach(function (line) { + if (line === '' && !isBody) { + isBody = true; + } else if (isBody) { + responseObject.response.body += line; + } else { + var headerSplit = line.indexOf(':'); + if (headerSplit !== -1) { + responseObject.response.headers[line.substring(0, headerSplit).trim().toLowerCase()] = line.substring(headerSplit + 1).trim(); + } + } + }); + + StorageServiceClient._parseResponse(responseObject.response, this.tableService.xml2jsSettings); + if (!responseObject.response.isSuccessful) { + responseObject.error = StorageServiceClient._normalizeError(responseObject.response.body, responseObject.response); + } + + if (!responseObject.error) { + var index = responseObject.response.headers[HeaderConstants.CONTENT_ID] || 0; + var propertyResolver; + var entityResolver; + if (index && this.operations[index]) { + var options = this.operations[index].options; + propertyResolver = options.propertyResolver; + entityResolver = options.entityResolver; + } + responseObject.entity = entityResult.parseEntity(responseObject.response, propertyResolver, entityResolver); + } + } + + return responseObject; +}; + +module.exports = BatchResult; + +/***/ }), + +/***/ 255: +/***/ ((module, exports, __nccwpck_require__) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var azureCommon = __nccwpck_require__(82187); +var Constants = azureCommon.Constants; +var TableConstants = Constants.TableConstants; +var HeaderConstants = Constants.HeaderConstants; +var odataHandler = __nccwpck_require__(8678); + +exports = module.exports; + +exports.serialize = function (entity) { + return odataHandler.serializeJson(entity); +}; + +exports.parseQuery = function (response, autoResolveProperties, propertyResolver, entityResolver) { + var result = {}; + if (response.body) { + result = odataHandler.parseJsonEntities(response.body, autoResolveProperties, propertyResolver, entityResolver); + } + + return result; +}; + +exports.parseEntity = function (response, autoResolveProperties, propertyResolver, entityResolver) { + var result = {}; + if (response.body) { + result = odataHandler.parseJsonSingleEntity(response.body, autoResolveProperties, propertyResolver, entityResolver); + } + + if (response.headers && response.headers[HeaderConstants.ETAG.toLowerCase()]) { + if (!result[TableConstants.ODATA_METADATA_MARKER]) { + result[TableConstants.ODATA_METADATA_MARKER] = {}; + } + + result[TableConstants.ODATA_METADATA_MARKER].etag = response.headers[HeaderConstants.ETAG.toLowerCase()]; + } + + return result; +}; + +exports.getEtag = function (entity) { + var etag; + if (entity && entity[TableConstants.ODATA_METADATA_MARKER]) { + etag = entity[TableConstants.ODATA_METADATA_MARKER].etag; + } + return etag; +}; + +/***/ }), + +/***/ 77843: +/***/ ((module, exports, __nccwpck_require__) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var odataHandler = __nccwpck_require__(8678); + +function TableResult(name) { + this.name = name; +} + +TableResult.serialize = function (tableName) { + return JSON.stringify({ TableName: tableName }); +}; + +TableResult.parse = function (response) { + var result = null; + if (response.body) { + result = odataHandler.parseJsonTables(response.body); + } + + return result; +}; + +exports = module.exports = TableResult; + +/***/ }), + +/***/ 72394: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var extend = __nccwpck_require__(99237); + +var azureCommon = __nccwpck_require__(82187); +var SR = azureCommon.SR; +var validate = azureCommon.validate; +var Constants = azureCommon.Constants; +var TableConstants = Constants.TableConstants; + +/** +* Creates a new TableBatch. +* +* @constructor +*/ +function TableBatch() { + this.operations = []; + this.pk = null; + this.retrieve = false; +} + +/** +* Removes all of the operations from the batch. +*/ +TableBatch.prototype.clear = function () { + this.operations = []; +}; + +/** +* Returns a boolean value indicating weather there are operations in the batch. +* +* @return {Boolean} True if there are operations queued up; false otherwise. +*/ +TableBatch.prototype.hasOperations = function () { + return this.operations.length > 0; +}; + +/** +* Returns the number of operations in the batch. +* +* @return {number} The number of operations in the batch. +*/ +TableBatch.prototype.size = function () { + return this.operations.length; +}; + +/** +* Adds a retrieve operation to the batch. Note that this must be the only operation in the batch. +* +* @param {string} partitionKey The partition key. +* @param {string} rowKey The row key. +* @param {object} [options] The request options. +* @param {string} [options.payloadFormat] The payload format to use for the request. +* @param {TableService~propertyResolver} [options.propertyResolver] The property resolver. Given the partition key, row key, property name, property value, +* and the property Edm type if given by the service, returns the Edm type of the property. +* @param {Function(entity)} [options.entityResolver] The entity resolver. Given the single entity returned by the query, returns a modified object. +*/ +TableBatch.prototype.retrieveEntity = function (partitionKey, rowKey, options) { + var entity = { PartitionKey: {_: partitionKey, $: 'Edm.String'}, + RowKey: {_: rowKey, $: 'Edm.String'}, + }; + this.addOperation(TableConstants.Operations.RETRIEVE, entity, options); +}; + +/** +* Adds an insert operation to the batch. +* +* @param {object} entity The entity. +* @param {object} [options] The request options. +* @param {string} [options.echoContent] Whether or not to return the entity upon a successful insert. Inserts only, default to false. +* @param {string} [options.payloadFormat] The payload format to use for the request. +* @param {TableService~propertyResolver} [options.propertyResolver] The property resolver. Only applied if echoContent is true. Given the partition key, row key, property name, +* property value, and the property Edm type if given by the service, returns the Edm type of the property. +* @param {Function(entity)} [options.entityResolver] The entity resolver. Only applied if echoContent is true. Given the single entity returned by the insert, returns +* a modified object. +*/ +TableBatch.prototype.insertEntity = function (entity, options) { + this.addOperation(TableConstants.Operations.INSERT, entity, options); +}; + +/** +* Adds a delete operation to the batch. +* +* @param {object} entity The entity. +*/ +TableBatch.prototype.deleteEntity = function (entity) { + this.addOperation(TableConstants.Operations.DELETE, entity); +}; + +/** +* Adds a merge operation to the batch. +* +* @param {object} entity The entity. +*/ +TableBatch.prototype.mergeEntity = function (entity) { + this.addOperation(TableConstants.Operations.MERGE, entity); +}; + +/** +* Adds an replace operation to the batch. +* +* @param {object} entity The entity. +*/ +TableBatch.prototype.replaceEntity = function (entity) { + this.addOperation(TableConstants.Operations.REPLACE, entity); +}; + +/** +* Adds an insert or replace operation to the batch. +* +* @param {object} entity The entity. +*/ +TableBatch.prototype.insertOrReplaceEntity = function (entity) { + this.addOperation(TableConstants.Operations.INSERT_OR_REPLACE, entity); +}; + +/** +* Adds an insert or merge operation to the batch. +* +* @param {object} entity The entity. +*/ +TableBatch.prototype.insertOrMergeEntity = function (entity) { + this.addOperation(TableConstants.Operations.INSERT_OR_MERGE, entity); +}; + +/** +* Adds an operation to the batch after performing checks. +* +* @param {string} operationType The type of operation to perform. See Constants.TableConstants.Operations +* @param {object} entity The entity. +* @param {object} [options] The request options. +*/ +TableBatch.prototype.addOperation = function (operationType, entity, options) { + validate.validateArgs('addOperation', function (v) { + v.object(entity, 'entity'); + v.object(entity.PartitionKey, 'entity.PartitionKey'); + v.object(entity.RowKey, 'entity.RowKey'); + v.stringAllowEmpty(entity.PartitionKey._, 'entity.PartitionKey._'); + v.stringAllowEmpty(entity.RowKey._, 'entity.RowKey._'); + }); + + if(this.operations.length >= 100) { + throw new Error(SR.BATCH_TOO_LARGE); + } + + if (operationType === TableConstants.Operations.RETRIEVE) { + if(this.hasOperations()) { + throw new Error(SR.BATCH_ONE_RETRIEVE); + } else { + this.retrieve = true; + } + } else if (this.retrieve) { + throw new Error(SR.BATCH_ONE_RETRIEVE); + } + + if (!this.hasOperations()) { + this.pk = entity.PartitionKey._; + } else if (entity.PartitionKey._ !== this.pk) { + throw new Error(SR.BATCH_ONE_PARTITION_KEY); + } + + var copiedOptions = extend(true, {}, options); + this.operations.push({type: operationType, entity: entity, options: copiedOptions}); +}; + +/** +* Gets an operation from the batch. Returns null if the index does not exist. +* +* @param {number} index The index in the operations array at which to remove an element. +* @return {object} The removed operation. +*/ +TableBatch.prototype.getOperation = function (index) { + return this.operations[index]; +}; + +/** +* Removes an operation from the batch. Returns null if the index does not exist. +* +* @param {number} index The index in the operations array at which to remove an element. +* @return {object} The removed operation. +*/ +TableBatch.prototype.removeOperation = function (index) { + var operation = this.operations.splice(index, 1)[0]; + + // if the array is empty, unlock the partition key + if (!this.hasOperations()) { + this.pk = null; + this.retrieve = false; + } + + return operation; +}; + +module.exports = TableBatch; + + +/***/ }), + +/***/ 13054: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var _ = __nccwpck_require__(7404); +var util = __nccwpck_require__(73837); + +var azureCommon = __nccwpck_require__(82187); +var azureutil = azureCommon.util; +var SR = azureCommon.SR; +var QueryStringConstants = azureCommon.Constants.QueryStringConstants; + +var edmHandler = __nccwpck_require__(63346); +var TableUtilities = __nccwpck_require__(581); +var QueryComparisons = TableUtilities.QueryComparisons; +var TableOperators = TableUtilities.TableOperators; +var EdmType = TableUtilities.EdmType; + +/** + * Creates a new TableQuery object. + * + * @constructor + */ +function TableQuery() { + this._fields = []; + this._where = []; + this._top = null; +} + +/** +* Specifies the select clause. If no arguments are given, all fields will be selected. +* +* @param {array} fields The fields to be selected. +* @return {TableQuery} A table query object with the select clause. +* @example +* var tableQuery = new TableQuery().select('field1', 'field2'); +*/ +TableQuery.prototype.select = function () { + var self = this; + if (arguments) { + _.each(arguments, function (argument) { + self._fields.push(argument); + }); + } + + return this; +}; + +/** + * Specifies the top clause. + * + * @param {int} top The number of items to fetch. + * @return {TableQuery} A table query object with the top clause. + * @example + * var tableQuery = new TableQuery().top(10); + * + * // tasktable should already exist and have entities + * tableService.queryEntities('tasktable', tableQuery, null \/*currentToken*\/, function(error, result) { + * if(!error) { + * var entities = result.entities; // there will be 10 or less entities + * // do stuff with the returned entities if there are any + * // if result.continuationToken exists, to get the next 10 (or less) entities + * // call queryEntities as above, but with the returned token instead of null + * } + * }); + */ +TableQuery.prototype.top = function (top) { + this._top = top; + return this; +}; + +/** + * Specifies the where clause. + * + * Valid type specifier strings include: ?string?, ?bool?, ?int32?, ?double?, ?date?, ?guid?, ?int64?, ?binary? + * A type must be specified for guid, int64, and binaries or the filter produced will be incorrect. + * + * @param {string} condition The condition string. + * @param {string|array} value Value(s) to insert in question mark (?) parameters. + * @return {TableQuery} A table query object with the where clause. + * @example + * var tableQuery = new TableQuery().where(TableQuery.guidFilter('GuidField', QueryComparisons.EQUAL, guidVal)); + * OR + * var tableQuery = new TableQuery().where('Name == ? or Name <= ?', name1, name2); + * OR + * var tableQuery = new TableQuery().where('Name == ?string? && Value == ?int64?', name1, int64Val); + * + * // tasktable should already exist and have entities + * tableService.queryEntities('tasktable', tableQuery, null \/*currentToken*\/, function(error, result, response) { + * if(!error) { + * var entities = result.entities; + * // do stuff with the returned entities if there are any + * } + * }); + */ +TableQuery.prototype.where = function (condition) { + this._where.push(TableQuery._encodeConditionString(condition, arguments)); + return this; +}; + +/** + * Generates a property filter condition string for an 'int' value. + * + * @param {string} propertyName A string containing the name of the property to compare. + * @param {string} operation A string containing the comparison operator to use. + * See Constants.TableConstants.QueryComparisons for a list of allowed operations. + * @param {string|int} value An 'int' containing the value to compare with the property. + * @return {string} A string containing the formatted filter condition. + * @example + * var query = TableQuery.int32Filter('IntField', QueryComparisons.EQUAL, 5); + */ +TableQuery.int32Filter = function (propertyName, operation, value) { + return TableQuery._concatFilterString(propertyName, operation, value, EdmType.INT32); +}; + +/** + * Generates a property filter condition string for a 'int64' value. + * + * @param {string} propertyName A string containing the name of the property to compare. + * @param {string} operation A string containing the comparison operator to use. + * See Constants.TableConstants.QueryComparisons for a list of allowed operations. + * @param {string|int64} value An 'int64' containing the value to compare with the property. + * @return {string} A string containing the formatted filter condition. + * @example + * var query = TableQuery.int64Filter('Int64Field', QueryComparisons.EQUAL, 123); + */ +TableQuery.int64Filter = function (propertyName, operation, value) { + return TableQuery._concatFilterString(propertyName, operation, value, EdmType.INT64); +}; + +/** + * Generates a property filter condition string for a 'double' value. + * + * @param {string} propertyName A string containing the name of the property to compare. + * @param {string} operation A string containing the comparison operator to use. + * See Constants.TableConstants.QueryComparisons for a list of allowed operations. + * @param {string|double}value A 'double' containing the value to compare with the property. + * @return {string} A string containing the formatted filter condition. + * @example + * var query = TableQuery.doubleFilter('DoubleField', QueryComparisons.EQUAL, 123.45); + */ +TableQuery.doubleFilter = function (propertyName, operation, value) { + return TableQuery._concatFilterString(propertyName, operation, value, EdmType.DOUBLE); +}; + +/** + * Generates a property filter condition string for a 'boolean' value. + * + * @param {string} propertyName A string containing the name of the property to compare. + * @param {string} operation A string containing the comparison operator to use. + * See Constants.TableConstants.QueryComparisons for a list of allowed operations. + * @param {string|boolean} value A 'boolean' containing the value to compare with the property. + * @return {string} A string containing the formatted filter condition. + * @example + * var query = TableQuery.booleanFilter('BooleanField', QueryComparisons.EQUAL, false); + */ +TableQuery.booleanFilter = function (propertyName, operation, value) { + return TableQuery._concatFilterString(propertyName, operation, value, EdmType.BOOLEAN); +}; + +/** + * Generates a property filter condition string for a 'datetime' value. + * + * @param {string} propertyName A string containing the name of the property to compare. + * @param {string} operation A string containing the comparison operator to use. + * See Constants.TableConstants.QueryComparisons for a list of allowed operations. + * @param {string|date} value A 'datetime' containing the value to compare with the property. + * @return {string} A string containing the formatted filter condition. + * @example + * var query = TableQuery.dateFilter('DateTimeField', QueryComparisons.EQUAL, new Date(Date.UTC(2001, 1, 3, 4, 5, 6))); + */ +TableQuery.dateFilter = function (propertyName, operation, value) { + return TableQuery._concatFilterString(propertyName, operation, value, EdmType.DATETIME); +}; + +/** + * Generates a property filter condition string for a 'guid' value. + * + * @param {string} propertyName A string containing the name of the property to compare. + * @param {string} operation A string containing the comparison operator to use. + * See Constants.TableConstants.QueryComparisons for a list of allowed operations. + * @param {string|guid} value A 'guid' containing the value to compare with the property. + * @return {string} A string containing the formatted filter condition. + * @example + * var query = TableQuery.guidFilter('GuidField', QueryComparisons.EQUAL, guid.v1()); + */ +TableQuery.guidFilter = function (propertyName, operation, value) { + return TableQuery._concatFilterString(propertyName, operation, value, EdmType.GUID); +}; + +/** + * Generates a property filter condition string for a 'binary' value. + * + * @param {string} propertyName A string containing the name of the property to compare. + * @param {string} operation A string containing the comparison operator to use. + * See Constants.TableConstants.QueryComparisons for a list of allowed operations. + * @param {string|buffer}value A 'buffer' containing the value to compare with the property. + * @return {string} A string containing the formatted filter condition. + * @example + * var query = TableQuery.binaryFilter('BinaryField', QueryComparisons.EQUAL, Buffer.from('hello')); + */ +TableQuery.binaryFilter = function (propertyName, operation, value) { + return TableQuery._concatFilterString(propertyName, operation, value, EdmType.BINARY); +}; + +/** + * Generates a property filter condition string. + * + * @param {string} propertyName A string containing the name of the property to compare. + * @param {string} operation A string containing the comparison operator to use. + * See Constants.TableConstants.QueryComparisons for a list of allowed operations. + * @param {string} value A 'string' containing the value to compare with the property. + * @return {string} A string containing the formatted filter condition. + * @example + * var query = TableQuery.stringFilter('StringField', QueryComparisons.EQUAL, 'name'); + */ +TableQuery.stringFilter = function (propertyName, operation, value) { + return TableQuery._concatFilterString(propertyName, operation, value, EdmType.STRING); +}; + +/** + * Creates a filter condition using the specified logical operator on two filter conditions. + * + * @param {string} filterA A string containing the first formatted filter condition. + * @param {string} operatorString A string containing the operator to use (AND, OR). + * @param {string} filterB A string containing the second formatted filter condition. + * @return {string} A string containing the combined filter expression. + * @example + * var filter1 = TableQuery.stringFilter('Name', QueryComparisons.EQUAL, 'Person'); + * var filter2 = TableQuery.booleanFilter('Visible', QueryComparisons.EQUAL, true); + * var combinedFilter = TableQuery.combineFilters(filter1, TableUtilities.TableOperators.AND, filter2); + */ +TableQuery.combineFilters = function (filterA, operatorString, filterB) { + return filterA + ' ' + operatorString + ' ' + filterB; +}; + +/** + * Specifies an AND where condition. + * + * @param {string} condition The condition string. + * @param {array} arguments Any number of arguments to be replaced in the condition by the question mark (?). + * @return {TableQuery} A table query object with the and clause. + * @example + * var tableQuery = new TableQuery() + * .where('Name == ? or Name <= ?', 'Person1', 'Person2'); + * .and('Age >= ?', 18); + */ +TableQuery.prototype.and = function (condition) { + if (this._where.length === 0) { + throw new Error(util.format(SR.QUERY_OPERATOR_REQUIRES_WHERE, 'AND')); + } + + this._where.push(' and ' + TableQuery._encodeConditionString(condition, arguments)); + return this; +}; + +/** + * Specifies an OR where condition. + * + * @param {string} condition The condition. + * @param {array} arguments Any number of arguments to be replaced in the condition by the question mark (?). + * @return {TableQuery} A table query object with the or clause. + * @example + * var tableQuery = new TableQuery() + * .where('Name == ? or Name <= ?', 'Person1', 'Person2'); + * .or('Age >= ?', 18); + */ +TableQuery.prototype.or = function (condition) { + if (this._where.length === 0) { + throw new Error(util.format(SR.QUERY_OPERATOR_REQUIRES_WHERE, 'OR')); + } + + this._where.push(' or ' + TableQuery._encodeConditionString(condition, arguments)); + return this; +}; + +/** + * Returns the query string object for the query. + * + * @return {object} JSON object representing the query string arguments for the query. + */ +TableQuery.prototype.toQueryObject = function () { + var query = {}; + if (this._fields.length > 0) { + query[QueryStringConstants.SELECT] = this._fields.join(','); + } + + if (this._where.length > 0) { + query[QueryStringConstants.FILTER] = this._where.join(''); + } + + if (this._top) { + query[QueryStringConstants.TOP] = this._top; + } + + return query; +}; + +// Functions + +/** +* Concat the filter string parameters. +* +* @param {string} propertyName A string containing the name of the property to compare. +* @param {string} operation A string containing the comparison operator to use. +* See Constants.TableConstants.QueryComparisons for a list of allowed operations. +* @param {object} value The value to compare with the property. +* @param {string} type A string EdmType of the property to compare. +* @return {string} A string containing the formatted filter condition. +* @ignore +*/ +TableQuery._concatFilterString = function (propertyName, operation, value, type) { + if (azureutil.objectIsNull(propertyName)) { + throw new Error(util.format(SR.ARGUMENT_NULL_OR_UNDEFINED, 'propertyName')); + } + + if (azureutil.objectIsNull(operation)) { + throw new Error(util.format(SR.ARGUMENT_NULL_OR_UNDEFINED, 'operation')); + } + + if (azureutil.objectIsNull(value)) { + throw new Error(util.format(SR.ARGUMENT_NULL_OR_UNDEFINED, 'value')); + } + + var serializedValue = edmHandler.serializeQueryValue(value, type); + return propertyName + ' ' + operation + ' ' + serializedValue; +}; + +/** + * Encodes a condition string. + * + * @param {string} condition The condition. + * @param {array} arguments Any number of arguments to be replaced in the condition by the question mark (?). + * @return {TableQuery} A table query object with the or clause + * @ignore + */ +TableQuery._encodeConditionString = function (condition, args) { + var encodedCondition = TableQuery._replaceOperators(condition); + if (args.length > 1) { + var sections = encodedCondition.split(/(\?string\?|\?int32\?|\?int64\?|\?bool\?|\?double\?|\?date\?|\?binary\?|\?guid\?|\?)/); + var count = 1; + for (var i = 0; i < sections.length && count < args.length; i++) { + if (sections[i].indexOf('?') === 0) { + var type = TableQuery._getEdmType(sections[i]); + sections[i] = edmHandler.serializeQueryValue(args[count], type); + count++; + } + } + encodedCondition = sections.join(''); + } + + return encodedCondition; +}; + +/** + * Converts the query string type to an Edm type. + * + * @param {string} type The type included in the query string. + * @return {string} The EdmType. + * @ignore + */ +TableQuery._getEdmType = function (type) { + switch (type) { + case '?binary?': + return EdmType.BINARY; + case '?int64?': + return EdmType.INT64; + case '?date?': + return EdmType.DATETIME; + case '?guid?': + return EdmType.GUID; + case '?int32?': + return EdmType.INT32; + case '?double?': + return EdmType.DOUBLE; + case '?bool?': + return EdmType.BOOLEAN; + case '?string?': + return EdmType.STRING; + default: + return undefined; + } +}; + +/** + * Replace operators. + * @ignore + * @param {string} whereClause The text where to replace the operators. + * @return {string} The string with the replaced operators. + * @ignore + */ +TableQuery._replaceOperators = function (whereClause) { + var encodedWhereClause = whereClause.replace(/ == /g, ' ' + QueryComparisons.EQUAL + ' '); + encodedWhereClause = encodedWhereClause.replace(/ != /g, ' ' + QueryComparisons.NOT_EQUAL + ' '); + encodedWhereClause = encodedWhereClause.replace(/ >= /g, ' ' + QueryComparisons.GREATER_THAN_OR_EQUAL + ' '); + encodedWhereClause = encodedWhereClause.replace(/ > /g, ' ' + QueryComparisons.GREATER_THAN + ' '); + encodedWhereClause = encodedWhereClause.replace(/ <= /g, ' ' + QueryComparisons.LESS_THAN_OR_EQUAL + ' '); + encodedWhereClause = encodedWhereClause.replace(/ < /g, ' ' + QueryComparisons.LESS_THAN + ' '); + encodedWhereClause = encodedWhereClause.replace(/ \&\& /g, ' ' + TableOperators.AND + ' '); + encodedWhereClause = encodedWhereClause.replace(/ \|\| /g, ' ' + TableOperators.OR + ' '); + encodedWhereClause = encodedWhereClause.replace(/!/g, TableOperators.NOT); + + return encodedWhereClause; +}; + +module.exports = TableQuery; + + +/***/ }), + +/***/ 8920: +/***/ ((module, __unused_webpack_exports, __nccwpck_require__) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Module dependencies. +var util = __nccwpck_require__(73837); +var extend = __nccwpck_require__(99237); +var _ = __nccwpck_require__(7404); + +var azureCommon = __nccwpck_require__(82187); +var azureutil = azureCommon.util; +var validate = azureCommon.validate; +var SR = azureCommon.SR; +var StorageServiceClient = azureCommon.StorageServiceClient; +var SharedKeyTable = __nccwpck_require__(92260); +var RequestHandler = __nccwpck_require__(2600); +var TableQuery = __nccwpck_require__(13054); +var WebResource = azureCommon.WebResource; +var Constants = azureCommon.Constants; +var QueryStringConstants = Constants.QueryStringConstants; +var HeaderConstants = Constants.HeaderConstants; +var TableConstants = Constants.TableConstants; +var RequestLocationMode = Constants.RequestLocationMode; + +// Models requires +var TableResult = __nccwpck_require__(77843); +var entityResult = __nccwpck_require__(255); +var BatchResult = __nccwpck_require__(54916); +var ServiceStatsParser = azureCommon.ServiceStatsParser; +var AclResult = azureCommon.AclResult; +var TableUtilities = __nccwpck_require__(581); + +/** +* Creates a new TableService object. +* If no connection string or storageaccount and storageaccesskey are provided, +* the AZURE_STORAGE_CONNECTION_STRING or AZURE_STORAGE_ACCOUNT and AZURE_STORAGE_ACCESS_KEY environment variables will be used. +* @class +* The TableService object allows you to peform management operations with the Microsoft Azure Table Service. +* The Table Service stores data in rows of key-value pairs. A table is composed of multiple rows, and each row +* contains key-value pairs. There is no schema, so each row in a table may store a different set of keys. +* +* For more information on the Table Service, as well as task focused information on using it from a Node.js application, see +* [How to Use the Table Service from Node.js](http://azure.microsoft.com/en-us/documentation/articles/storage-nodejs-how-to-use-table-storage/). +* The following defaults can be set on the Table service. +* defaultTimeoutIntervalInMs The default timeout interval, in milliseconds, to use for request made via the Table service. +* defaultClientRequestTimeoutInMs The default timeout of client requests, in milliseconds, to use for the request made via the Table service. +* defaultMaximumExecutionTimeInMs The default maximum execution time across all potential retries, for requests made via the Table service. +* defaultLocationMode The default location mode for requests made via the Table service. +* defaultPayloadFormat The default payload format for requests made via the Table service. +* useNagleAlgorithm Determines whether the Nagle algorithm is used for requests made via the Table service.; true to use the +* Nagle algorithm; otherwise, false. The default value is false. +* enableGlobalHttpAgent Determines whether global HTTP(s) agent is enabled; true to use Global HTTP(s) agent; otherwise, false to use +* http(s).Agent({keepAlive:true}). +* @constructor +* @extends {StorageServiceClient} +* +* @param {string} [storageAccountOrConnectionString] The storage account or the connection string. +* @param {string} [storageAccessKey] The storage access key. +* @param {string|object} [host] The host address. To define primary only, pass a string. +* Otherwise 'host.primaryHost' defines the primary host and 'host.secondaryHost' defines the secondary host. +* @param {string} [sasToken] The Shared Access Signature token. +* @param {string} [endpointSuffix] The endpoint suffix. +*/ +function TableService(storageAccountOrConnectionString, storageAccessKey, host, sasToken, endpointSuffix) { + var storageServiceSettings = StorageServiceClient.getStorageSettings(storageAccountOrConnectionString, storageAccessKey, host, sasToken, endpointSuffix); + + TableService['super_'].call(this, + storageServiceSettings._name, + storageServiceSettings._key, + storageServiceSettings._tableEndpoint, + storageServiceSettings._usePathStyleUri, + storageServiceSettings._sasToken); + + if (this.anonymous) { + throw new Error(SR.ANONYMOUS_ACCESS_BLOBSERVICE_ONLY); + } + + if(this.storageAccount && this.storageAccessKey) { + this.storageCredentials = new SharedKeyTable(this.storageAccount, this.storageAccessKey, this.usePathStyleUri); + } + + this.defaultPayloadFormat = TableUtilities.PayloadFormat.MINIMAL_METADATA; +} + +util.inherits(TableService, StorageServiceClient); + +// Table service methods + +/** +* Gets the service stats for a storage account’s Table service. +* +* @this {TableService} +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `[result]{@link ServiceStats}` will contain the stats. +* `response` will contain information related to this operation. +*/ +TableService.prototype.getServiceStats = function (optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('getServiceStats', function (v) { + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + options.requestLocationMode = Constants.RequestLocationMode.PRIMARY_OR_SECONDARY; + + var webResource = WebResource.get() + .withQueryOption(QueryStringConstants.COMP, 'stats') + .withQueryOption(QueryStringConstants.RESTYPE, 'service'); + + var processResponseCallback = function (responseObject, next) { + responseObject.serviceStatsResult = null; + if (!responseObject.error) { + responseObject.serviceStatsResult = ServiceStatsParser.parse(responseObject.response.body.StorageServiceStats); + } + + // function to be called after all filters + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.serviceStatsResult, returnObject.response); + }; + + // call the first filter + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Gets the properties of a storage account’s Table service, including Azure Storage Analytics. +* +* @this {TableService} +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `[result]{@link ServiceProperties}` will contain the properties. +* `response` will contain information related to this operation. +*/ +TableService.prototype.getServiceProperties = function (optionsOrCallback, callback) { + return this.getAccountServiceProperties(optionsOrCallback, callback); +}; + +/** +* Sets the properties of a storage account’s Table service, including Azure Storage Analytics. +* You can also use this operation to set the default request version for all incoming requests that do not have a version specified. +* +* @this {TableService} +* @param {object} serviceProperties The service properties. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information if an error occurs; +* `response` will contain information related to this operation. +*/ +TableService.prototype.setServiceProperties = function (serviceProperties, optionsOrCallback, callback) { + return this.setAccountServiceProperties(serviceProperties, optionsOrCallback, callback); +}; + +/** +* Lists a segment containing a collection of table items under the specified account. +* +* @this {TableService} +* @param {object} currentToken A continuation token returned by a previous listing operation. Please use 'null' or 'undefined' if this is the first operation. +* @param {object} [options] The create options or callback function. +* @param {int} [options.maxResults] Specifies the maximum number of tables to return per call to Azure ServiceClient. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {string} [options.payloadFormat] The payload format to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `result` will contain `entries` and `continuationToken`. +* `entries` gives a list of tables and the `continuationToken` is used for the next listing operation. +* `response` will contain information related to this operation. +*/ +TableService.prototype.listTablesSegmented = function (currentToken, optionsOrCallback, callback) { + this.listTablesSegmentedWithPrefix(null /* prefix */, currentToken, optionsOrCallback, callback); +}; + +/** +* Lists a segment containing a collection of table items under the specified account. +* +* @this {TableService} +* @param {string} prefix The prefix of the table name. +* @param {object} currentToken A continuation token returned by a previous listing operation. Please use 'null' or 'undefined' if this is the first operation. +* @param {object} [options] The create options or callback function. +* @param {int} [options.maxResults] Specifies the maximum number of tables to return per call to Azure ServiceClient. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {string} [options.payloadFormat] The payload format to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `result` will contain `entries` and `continuationToken`. +* `entries` gives a list of tables and the `continuationToken` is used for the next listing operation. +* `response` will contain information related to this operation. +*/ +TableService.prototype.listTablesSegmentedWithPrefix = function (prefix, currentToken, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('listTables', function (v) { + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + options.payloadFormat = options.payloadFormat || this.defaultPayloadFormat; + + var webResource = WebResource.get(TableConstants.TABLE_SERVICE_TABLE_NAME); + RequestHandler.setTableRequestHeadersAndBody(webResource, null, options.payloadFormat); + + if(!azureutil.objectIsNull(currentToken)) { + webResource.withQueryOption(TableConstants.NEXT_TABLE_NAME, currentToken.nextTableName); + } + + if(!azureutil.objectIsNull(prefix)) { + var query = new TableQuery() + .where(TableConstants.TABLE_NAME + ' >= ?', prefix) + .and(TableConstants.TABLE_NAME + ' < ?', prefix + '{'); + + webResource.withQueryOption(QueryStringConstants.FILTER, query.toQueryObject().$filter); + } + + if(!azureutil.objectIsNull(options.maxResults)) { + var query = new TableQuery().top(options.maxResults); + webResource.withQueryOption(QueryStringConstants.TOP, query.toQueryObject().$top); + } + + options.requestLocationMode = azureutil.getNextListingLocationMode(currentToken); + + var processResponseCallback = function (responseObject, next) { + responseObject.listTablesResult = null; + + if (!responseObject.error) { + responseObject.listTablesResult = { + entries: null, + continuationToken: null + }; + responseObject.listTablesResult.entries = TableResult.parse(responseObject.response); + + if (responseObject.response.headers[TableConstants.CONTINUATION_NEXT_TABLE_NAME] && + !azureutil.objectIsEmpty(responseObject.response.headers[TableConstants.CONTINUATION_NEXT_TABLE_NAME])) { + responseObject.listTablesResult.continuationToken = { + nextTableName: null, + targetLocation: null + }; + + responseObject.listTablesResult.continuationToken.nextTableName = responseObject.response.headers[TableConstants.CONTINUATION_NEXT_TABLE_NAME]; + responseObject.listTablesResult.continuationToken.targetLocation = responseObject.targetLocation; + } + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.listTablesResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +// Table Methods + +/** +* Gets the table's ACL. +* +* @this {TableService} +* @param {string} table The table name. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `result` will contain the ACL information for the table. See `[AccessPolicy]{@link AccessPolicy}` for detailed information. +* `response` will contain information related to this operation. +*/ +TableService.prototype.getTableAcl = function (table, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('getTableAcl', function (v) { + v.string(table, 'table'); + v.tableNameIsValid(table); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + options.requestLocationMode = Constants.RequestLocationMode.PRIMARY_OR_SECONDARY; + + var webResource = WebResource.get(table) + .withQueryOption(QueryStringConstants.COMP, 'acl'); + + var processResponseCallback = function (responseObject, next) { + responseObject.tableResult = null; + if (!responseObject.error) { + responseObject.tableResult = new TableResult(table); + responseObject.tableResult.signedIdentifiers = AclResult.parse(responseObject.response.body); + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.tableResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Updates the table's ACL. +* +* @this {TableService} +* @param {string} table The table name. +* @param {Object.} signedIdentifiers The table ACL settings. See `[AccessPolicy]{@link AccessPolicy}` for detailed information. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `result` will contain information for the table. +* `response` will contain information related to this operation. +*/ +TableService.prototype.setTableAcl = function (table, signedIdentifiers, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('setTableAcl', function (v) { + v.string(table, 'table'); + v.tableNameIsValid(table); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + var policies = null; + if (signedIdentifiers) { + if(_.isArray(signedIdentifiers)) { + throw new TypeError(SR.INVALID_SIGNED_IDENTIFIERS); + } + policies = AclResult.serialize(signedIdentifiers); + } + + var webResource = WebResource.put(table) + .withQueryOption(QueryStringConstants.COMP, 'acl') + .withHeader(HeaderConstants.CONTENT_LENGTH, !azureutil.objectIsNull(policies) ? Buffer.byteLength(policies) : 0) + .withBody(policies); + + var processResponseCallback = function (responseObject, next) { + responseObject.tableResult = null; + if (!responseObject.error) { + + // SetTableAcl doesn't actually return anything in the response + responseObject.tableResult = new TableResult(table); + if (signedIdentifiers) { + responseObject.tableResult.signedIdentifiers = signedIdentifiers; + } + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.tableResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, webResource.body, options, processResponseCallback); +}; + +/** +* Retrieves a shared access signature token. +* +* @this {TableService} +* @param {string} table The table name. +* @param {object} sharedAccessPolicy The shared access policy. +* @param {string} [sharedAccessPolicy.Id] The signed identifier. +* @param {object} [sharedAccessPolicy.AccessPolicy.Permissions] The permission type. +* @param {date|string} [sharedAccessPolicy.AccessPolicy.Start] The time at which the Shared Access Signature becomes valid (The UTC value will be used). +* @param {date|string} [sharedAccessPolicy.AccessPolicy.Expiry] The time at which the Shared Access Signature becomes expired (The UTC value will be used). +* @param {string} [sharedAccessPolicy.AccessPolicy.IPAddressOrRange] An IP address or a range of IP addresses from which to accept requests. When specifying a range, note that the range is inclusive. +* @param {string} [sharedAccessPolicy.AccessPolicy.Protocols] The protocols permitted for a request made with the account SAS. +* Possible values are both HTTPS and HTTP (https,http) or HTTPS only (https). The default value is https,http. +* @param {string} [sharedAccessPolicy.AccessPolicy.StartPk] The starting Partition Key for which the SAS will be valid. +* @param {string} [sharedAccessPolicy.AccessPolicy.EndPk] The ending Partition Key for which the SAS will be valid. +* @param {string} [sharedAccessPolicy.AccessPolicy.StartRk] The starting Row Key for which the SAS will be valid. +* @param {string} [sharedAccessPolicy.AccessPolicy.EndRk] The ending Row Key for which the SAS will be valid. +* @return {object} An object with the shared access signature. +*/ +TableService.prototype.generateSharedAccessSignature = function (table, sharedAccessPolicy) { + // check if the TableService is able to generate a shared access signature + if (!this.storageCredentials || !this.storageCredentials.generateSignedQueryString) { + throw new Error(SR.CANNOT_CREATE_SAS_WITHOUT_ACCOUNT_KEY); + } + + validate.validateArgs('generateSharedAccessSignature', function (v) { + v.string(table, 'table'); + v.tableNameIsValid(table); + v.object(sharedAccessPolicy, 'sharedAccessPolicy'); + }); + + var lowerCasedTableName = table.toLowerCase(); + return this.storageCredentials.generateSignedQueryString(Constants.ServiceType.Table, lowerCasedTableName, sharedAccessPolicy, null, { tableName: lowerCasedTableName }); +}; + +/** +* Checks whether or not a table exists on the service. +* +* @this {TableService} +* @param {string} table The table name. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `result` will contain the table information including `exists` boolean member. +* `response` will contain information related to this operation. +*/ +TableService.prototype.doesTableExist = function (table, optionsOrCallback, callback) { + this._doesTableExist(table, false, optionsOrCallback, callback); +}; + +/** +* Creates a new table within a storage account. +* +* @this {TableService} +* @param {string} table The table name. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `result` will contain the new table information. +* `response` will contain information related to this operation. +*/ +TableService.prototype.createTable = function (table, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('createTable', function (v) { + v.string(table, 'table'); + v.tableNameIsValid(table); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + var tableDescriptor = TableResult.serialize(table); + + var webResource = WebResource.post('Tables') + .withHeader(HeaderConstants.PREFER, HeaderConstants.PREFER_NO_CONTENT); + + RequestHandler.setTableRequestHeadersAndBody(webResource, tableDescriptor, this.defaultPayloadFormat); + + var processResponseCallback = function (responseObject, next) { + responseObject.tableResponse = {}; + responseObject.tableResponse.isSuccessful = responseObject.error ? false : true; + responseObject.tableResponse.statusCode = responseObject.response === null || responseObject.response === undefined ? undefined : responseObject.response.statusCode; + if (!responseObject.error) { + responseObject.tableResponse.TableName = table; + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.tableResponse, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, webResource.body, options, processResponseCallback); +}; + +/** +* Creates a new table within a storage account if it does not exists. +* +* @this {TableService} +* @param {string} table The table name. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* `result` will contain the table information including `created` boolean member +* `response` will contain information related to this operation. +* +* @example +* var azure = require('azure-storage'); +* var tableService = azure.createTableService(); +* tableService.createTableIfNotExists('tasktable', function(error) { +* if(!error) { +* // Table created or exists +* } +* }); +*/ +TableService.prototype.createTableIfNotExists = function (table, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('createTableIfNotExists', function (v) { + v.string(table, 'table'); + v.tableNameIsValid(table); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + var self = this; + self._doesTableExist(table, true, options, function(error, result, response) { + var exists = result.exists; + result.created = false; + delete result.exists; + + if (error) { + callback(error, result, response); + } else if (exists) { + response.isSuccessful = true; + callback(error, result, response); + } else { + self.createTable(table, options, function(createError, createResult, response) { + if (!createError) { + createResult.created = true; + } + else if (createError && createError.statusCode === Constants.HttpConstants.HttpResponseCodes.Conflict && createError.code === Constants.TableErrorCodeStrings.TABLE_ALREADY_EXISTS) { + createError = null; + createResult.created = false; + createResult.isSuccessful = true; + } + callback(createError, createResult, response); + }); + } + }); +}; + +/** +* Deletes a table from a storage account. +* +* @this {TableService} +* @param {string} table The table name. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information if an error occurs; +* `response` will contain information related to this operation. +*/ +TableService.prototype.deleteTable = function (table, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('deleteTable', function (v) { + v.string(table, 'table'); + v.tableNameIsValid(table); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + var webResource = WebResource.del('Tables(\'' + table + '\')'); + RequestHandler.setTableRequestHeadersAndBody(webResource, null, this.defaultPayloadFormat); + + var processResponseCallback = function (responseObject, next) { + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Deletes a table from a storage account, if it exists. +* +* @this {TableService} +* @param {string} table The table name. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* `result` will be `true` if table was deleted, false otherwise +* `response` will contain information related to this operation. +*/ +TableService.prototype.deleteTableIfExists = function (table, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('deleteTableIfExists', function (v) { + v.string(table, 'table'); + v.tableNameIsValid(table); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + var self = this; + self._doesTableExist(table, true, options, function(error, result, response) { + if (error) { + callback(error, result.exists, response); + } else if (!result.exists) { + response.isSuccessful = true; + callback(error, false, response); + } else { + self.deleteTable(table, options, function(deleteError, deleteResponse) { + var deleted; + if (!deleteError) { + deleted = true; + } else if (deleteError && deleteError.statusCode === Constants.HttpConstants.HttpResponseCodes.NotFound && deleteError.code === Constants.StorageErrorCodeStrings.RESOURCE_NOT_FOUND) { + deleted = false; + deleteError = null; + deleteResponse.isSuccessful = true; + } + + callback(deleteError, deleted, deleteResponse); + }); + } + }); +}; + +// Table Entity Methods + +/** +* Queries data in a table. To retrieve a single entity by partition key and row key, use retrieve entity. +* +* @this {TableService} +* @param {string} table The table name. +* @param {TableQuery} tableQuery The query to perform. Use null, undefined, or new TableQuery() to get all of the entities in the table. +* @param {object} currentToken A continuation token returned by a previous listing operation. +* Please use 'null' or 'undefined' if this is the first operation. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {string} [options.payloadFormat] The payload format to use for the request. +* @param {bool} [options.autoResolveProperties] If true, guess at all property types. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {Function(entity)} [options.entityResolver] The entity resolver. Given a single entity returned by the query, returns a modified object which is added to +* the entities array. +* @param {TableService~propertyResolver} [options.propertyResolver] The property resolver. Given the partition key, row key, property name, property value, +* and the property Edm type if given by the service, returns the Edm type of the property. +* @param {TableService~queryResponse} callback `error` will contain information if an error occurs; +* otherwise `entries` will contain the entities returned by the query. +* If more matching entities exist, and could not be returned, +* `queryResultContinuation` will contain a continuation token that can be used +* to retrieve the next set of results. +* `response` will contain information related to this operation. +* +* The logic for returning entity types can get complicated. Here is the algorithm used: +* ``` +* var propertyType; +* +* if (propertyResovler) { // If the caller provides a propertyResolver in the options, use it +* propertyType = propertyResolver(partitionKey, rowKey, propertyName, propertyValue, propertyTypeFromService); +* } else if (propertyTypeFromService) { // If the service provides us a property type, use it. See below for an explanation of when this will and won't occur. +* propertyType = propertyTypeFromService; +* } else if (autoResolveProperties) { // If options.autoResolveProperties is set to true +* if (javascript type is string) { // See below for an explanation of how and why autoResolveProperties works as it does. +* propertyType = 'Edm.String'; +* } else if (javascript type is boolean) { +* propertyType = 'Edm.Boolean'; +* } +* } +* +* if (propertyType) { +* // Set the property type on the property. +* } else { +* // Property gets no EdmType. +* } +* ``` +* Notes: +* +* * The service only provides a type if JsonFullMetadata or JsonMinimalMetadata is used, and if the type is Int64, Guid, Binary, or DateTime. +* * Explanation of autoResolveProperties: +* * String gets correctly resolved to 'Edm.String'. +* * Int64, Guid, Binary, and DateTime all get resolved to 'Edm.String.' This only happens if JsonNoMetadata is used (otherwise the service will provide the propertyType in a prior step). +* * Boolean gets correctly resolved to 'Edm.Boolean'. +* * For both Int32 and Double, no type information is returned, even in the case of autoResolveProperties = true. This is due to an +* inability to distinguish between the two in certain cases. +* +* @example +* var azure = require('azure-storage'); +* var tableService = azure.createTableService(); +* // tasktable should already exist and have entities +* +* // returns all entities in tasktable, and a continuation token for the next page of results if necessary +* tableService.queryEntities('tasktable', null, null \/*currentToken*\/, function(error, result) { +* if(!error) { +* var entities = result.entries; +* // do stuff with the returned entities if there are any +* } +* }); +* +* // returns field1 and field2 of the entities in tasktable, and a continuation token for the next page of results if necessary +* var tableQuery = new TableQuery().select('field1', 'field2'); +* tableService.queryEntities('tasktable', tableQuery, null \/*currentToken*\/, function(error, result) { +* if(!error) { +* var entities = result.entries; +* // do stuff with the returned entities if there are any +* } +* }); +*/ +TableService.prototype.queryEntities = function (table, tableQuery, currentToken, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('queryEntities', function (v) { + v.string(table, 'table'); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + options.payloadFormat = options.payloadFormat || this.defaultPayloadFormat; + + var webResource = WebResource.get(table); + RequestHandler.setTableRequestHeadersAndBody(webResource, null, options.payloadFormat); + + if (tableQuery) { + var queryString = tableQuery.toQueryObject(); + Object.keys(queryString).forEach(function (queryStringName) { + webResource.withQueryOption(queryStringName, queryString[queryStringName]); + }); + } + + if(!azureutil.objectIsNull(currentToken)) { + webResource.withQueryOption(TableConstants.NEXT_PARTITION_KEY, currentToken.nextPartitionKey); + webResource.withQueryOption(TableConstants.NEXT_ROW_KEY, currentToken.nextRowKey); + } + + options.requestLocationMode = azureutil.getNextListingLocationMode(currentToken); + + var processResponseCallback = function (responseObject, next) { + responseObject.queryEntitiesResult = null; + if (!responseObject.error) { + responseObject.queryEntitiesResult = { + entries: null, + continuationToken: null + }; + + // entries + responseObject.queryEntitiesResult.entries = entityResult.parseQuery(responseObject.response, options.autoResolveProperties, options.propertyResolver, options.entityResolver); + + // continuation token + var continuationToken = { + nextPartitionKey: responseObject.response.headers[TableConstants.CONTINUATION_NEXT_PARTITION_KEY], + nextRowKey: responseObject.response.headers[TableConstants.CONTINUATION_NEXT_ROW_KEY], + targetLocation: responseObject.targetLocation + }; + + if (!azureutil.IsNullOrEmptyOrUndefinedOrWhiteSpace(continuationToken.nextPartitionKey)) { + responseObject.queryEntitiesResult.continuationToken = continuationToken; + } + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.queryEntitiesResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Retrieves an entity from a table. +* +* @this {TableService} +* @param {string} table The table name. +* @param {string} partitionKey The partition key. +* @param {string} rowKey The row key. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {string} [options.payloadFormat] The payload format to use for the request. +* @param {bool} [options.autoResolveProperties] If true, guess at all property types. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {TableService~propertyResolver} [options.propertyResolver] The property resolver. Given the partition key, row key, property name, property value, +* and the property Edm type if given by the service, returns the Edm type of the property. +* @param {Function(entity)} [options.entityResolver] The entity resolver. Given the single entity returned by the query, returns a modified object. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `result` will be the matching entity. +* `response` will contain information related to this operation. +* +* The logic for returning entity types can get complicated. Here is the algorithm used: +* ``` +* var propertyType; +* +* if (propertyResovler) { // If the caller provides a propertyResolver in the options, use it +* propertyType = propertyResolver(partitionKey, rowKey, propertyName, propertyValue, propertyTypeFromService); +* } else if (propertyTypeFromService) { // If the service provides us a property type, use it. See below for an explanation of when this will and won't occur. +* propertyType = propertyTypeFromService; +* } else if (autoResolveProperties) { // If options.autoResolveProperties is set to true +* if (javascript type is string) { // See below for an explanation of how and why autoResolveProperties works as it does. +* propertyType = 'Edm.String'; +* } else if (javascript type is boolean) { +* propertyType = 'Edm.Boolean'; +* } +* } +* +* if (propertyType) { +* // Set the property type on the property. +* } else { +* // Property gets no EdmType. +* } +* ``` +* Notes: +* +* * The service only provides a type if JsonFullMetadata or JsonMinimalMetadata is used, and if the type is Int64, Guid, Binary, or DateTime. +* * Explanation of autoResolveProperties: +* * String gets correctly resolved to 'Edm.String'. +* * Int64, Guid, Binary, and DateTime all get resolved to 'Edm.String.' This only happens if JsonNoMetadata is used (otherwise the service will provide the propertyType in a prior step). +* * Boolean gets correctly resolved to 'Edm.Boolean'. +* * For both Int32 and Double, no type information is returned, even in the case of autoResolveProperties = true. This is due to an +* inability to distinguish between the two in certain cases. +* +* @example +* var azure = require('azure-storage'); +* var tableService = azure.createTableService(); +* tableService.retrieveEntity('tasktable', 'tasksSeattle', '1', function(error, serverEntity) { +* if(!error) { +* // Entity available in serverEntity variable +* } +* }); +*/ +TableService.prototype.retrieveEntity = function (table, partitionKey, rowKey, optionsOrCallback, callback) { + var entityDescriptor = { PartitionKey: {_: partitionKey, $: 'Edm.String'}, + RowKey: {_: rowKey, $: 'Edm.String'}, + }; + + validate.validateArgs('retrieveEntity', function (v) { + v.stringAllowEmpty(partitionKey, 'partitionKey'); + v.stringAllowEmpty(rowKey, 'rowKey'); + }); + + this._performEntityOperation(TableConstants.Operations.RETRIEVE, table, entityDescriptor, optionsOrCallback, callback); +}; + +/** +* Inserts a new entity into a table. +* +* @this {TableService} +* @param {string} table The table name. +* @param {object} entityDescriptor The entity descriptor. +* @param {object} [options] The request options. +* @param {string} [options.echoContent] Whether or not to return the entity upon a successful insert. Default to false. +* @param {string} [options.payloadFormat] The payload format to use in the response, if options.echoContent is true. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {TableService~propertyResolver} [options.propertyResolver] The property resolver. Only applied if echoContent is true. Given the partition key, row key, property name, +* property value, and the property Edm type if given by the service, returns the Edm type of the property. +* @param {Function(entity)} [options.entityResolver] The entity resolver. Only applied if echoContent is true. Given the single entity returned by the insert, returns +* a modified object. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `result` will contain the entity information. +* `response` will contain information related to this operation. +* +* @example +* var azure = require('azure-storage'); +* var tableService = azure.createTableService(); +* var task1 = { +* PartitionKey : {'_': 'tasksSeattle', '$':'Edm.String'}, +* RowKey: {'_': '1', '$':'Edm.String'}, +* Description: {'_': 'Take out the trash', '$':'Edm.String'}, +* DueDate: {'_': new Date(2011, 12, 14, 12), '$':'Edm.DateTime'} +* }; +* tableService.insertEntity('tasktable', task1, function(error) { +* if(!error) { +* // Entity inserted +* } +* }); +*/ +TableService.prototype.insertEntity = function (table, entityDescriptor, optionsOrCallback, callback) { + this._performEntityOperation(TableConstants.Operations.INSERT, table, entityDescriptor, optionsOrCallback, callback); +}; + +/** +* Inserts or updates a new entity into a table. +* +* @this {TableService} +* @param {string} table The table name. +* @param {object} entityDescriptor The entity descriptor. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `result` will contain the entity information. +* `response` will contain information related to this operation. +*/ +TableService.prototype.insertOrReplaceEntity = function (table, entityDescriptor, optionsOrCallback, callback) { + this._performEntityOperation(TableConstants.Operations.INSERT_OR_REPLACE, table, entityDescriptor, optionsOrCallback, callback); +}; + +/** +* Replaces an existing entity within a table. To replace conditionally based on etag, set entity['.metadata']['etag']. +* +* @this {TableService} +* @param {string} table The table name. +* @param {object} entityDescriptor The entity descriptor. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `result` will contain the entity information. +* `response` will contain information related to this operation. +*/ +TableService.prototype.replaceEntity = function (table, entityDescriptor, optionsOrCallback, callback) { + this._performEntityOperation(TableConstants.Operations.REPLACE, table, entityDescriptor, optionsOrCallback, callback); +}; + +/** +* Updates an existing entity within a table by merging new property values into the entity. To merge conditionally based on etag, set entity['.metadata']['etag']. +* +* @this {TableService} +* @param {string} table The table name. +* @param {object} entityDescriptor The entity descriptor. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `result` will contain the entity information. +* response` will contain information related to this operation. +*/ +TableService.prototype.mergeEntity = function (table, entityDescriptor, optionsOrCallback, callback) { + this._performEntityOperation(TableConstants.Operations.MERGE, table, entityDescriptor, optionsOrCallback, callback); +}; + +/** +* Inserts or updates an existing entity within a table by merging new property values into the entity. +* +* @this {TableService} +* @param {string} table The table name. +* @param {object} entityDescriptor The entity descriptor. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `result` will contain the entity information. +* `response` will contain information related to this operation. +*/ +TableService.prototype.insertOrMergeEntity = function (table, entityDescriptor, optionsOrCallback, callback) { + this._performEntityOperation(TableConstants.Operations.INSERT_OR_MERGE, table, entityDescriptor, optionsOrCallback, callback); +}; + +/** +* Deletes an entity within a table. To delete conditionally based on etag, set entity['.metadata']['etag']. +* +* @this {TableService} +* @param {string} table The table name. +* @param {object} entityDescriptor The entity descriptor. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResponse} callback `error` will contain information if an error occurs; +* `response` will contain information related to this operation. +*/ +TableService.prototype.deleteEntity = function (table, entityDescriptor, optionsOrCallback, callback) { + this._performEntityOperation(TableConstants.Operations.DELETE, table, entityDescriptor, optionsOrCallback, callback); +}; + +/** +* Executes the operations in the batch. +* +* @this {TableService} +* @param {string} table The table name. +* @param {TableBatch} batch The table batch to execute. +* @param {object} [options] The create options or callback function. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `result` will contain responses for each operation executed in the batch; +* `result.entity` will contain the entity information for each operation executed. +* `result.response` will contain the response for each operations executed. +* `response` will contain information related to this operation. +*/ +TableService.prototype.executeBatch = function (table, batch, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('executeBatch', function (v) { + v.string(table, 'table'); + v.tableNameIsValid(table); + v.object(batch, 'batch'); + v.callback(callback); + }); + + if(!batch.hasOperations()) { + throw new Error(SR.EMPTY_BATCH); + } + + var options = extend(true, {}, userOptions); + + var batchResult = new BatchResult(this, table, batch.operations); + var webResource = batchResult.constructWebResource(); + + var body = batchResult.serialize(); + webResource.withBody(body); + webResource.withHeader(HeaderConstants.CONTENT_LENGTH, Buffer.byteLength(body, 'utf8')); + + var processResponseCallback = function (responseObject, next) { + var responseObjects = batchResult.parse(responseObject); + + var noError = true; + // if the batch was unsuccesful, there will be a single response indicating the error + if (responseObjects && responseObjects.length > 0) { + responseObjects.forEach(function(item){ + if(noError && !item.response.isSuccessful){ + responseObject = item; + noError = false; + } + }); + } + + if (noError) { + responseObject.operationResponses = responseObjects; + } + + var finalCallback = function (returnObject) { + // perform final callback + callback(returnObject.error, returnObject.operationResponses, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, webResource.body, options, processResponseCallback); +}; + +// Private methods + +/** +* Checks whether or not a table exists on the service. +* @ignore +* +* @this {TableService} +* @param {string} table The table name. +* @param {string} primaryOnly If true, the request will be executed against the primary storage location. +* @param {object} [options] The request options. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {Function(error, result, response)} callback `error` will contain information if an error occurs; +* otherwise `result` will contain +* the table information including `exists` boolean member. +* `response` will contain information related to this operation. +*/ +TableService.prototype._doesTableExist = function (table, primaryOnly, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('doesTableExist', function (v) { + v.string(table, 'table'); + v.tableNameIsValid(table); + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + + if(primaryOnly === false) { + options.requestLocationMode = RequestLocationMode.PRIMARY_OR_SECONDARY; + } + + var webResource = WebResource.get('Tables(\'' + table + '\')'); + webResource.withHeader(HeaderConstants.ACCEPT, this.defaultPayloadFormat); + + var processResponseCallback = function (responseObject, next) { + responseObject.tableResult = {}; + responseObject.tableResult.isSuccessful = responseObject.error ? false : true; + responseObject.tableResult.statusCode = responseObject.response === null || responseObject.response === undefined ? undefined : responseObject.response.statusCode; + responseObject.tableResult.TableName = table; + + if(!responseObject.error){ + responseObject.tableResult.exists = true; + } else if (responseObject.error && responseObject.error.statusCode === Constants.HttpConstants.HttpResponseCodes.NotFound) { + responseObject.error = null; + responseObject.tableResult.exists = false; + responseObject.response.isSuccessful = true; + } + + var finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.tableResult, returnObject.response); + }; + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, null, options, processResponseCallback); +}; + +/** +* Performs a table operation. +* +* @this {TableService} +* @param {string} operation The operation to perform. +* @param {string} table The table name. +* @param {object} entityDescriptor The entity descriptor. +* @param {object} [options] The create options or callback function. +* @param {string} [options.echoContent] Whether or not to return the entity upon a successful insert. Default to false. +* @param {string} [options.payloadFormat] The payload format to use for the request. +* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to. +* Please see StorageUtilities.LocationMode for the possible values. +* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request. +* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request. +* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request. +* The maximum execution time interval begins at the time that the client begins building the request. The maximum +* execution time is checked intermittently while performing requests, and before executing retries. +* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit. +* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false. +* The default value is false. +* @param {errorOrResult} callback `error` will contain information if an error occurs; +* otherwise `entity` will contain the entity information. +* `response` will contain information related to this operation. +* @ignore +*/ +TableService.prototype._performEntityOperation = function (operation, table, entityDescriptor, optionsOrCallback, callback) { + var userOptions; + azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; }); + + validate.validateArgs('entityOperation', function (v) { + v.string(table, 'table'); + v.tableNameIsValid(table); + v.object(entityDescriptor, 'entityDescriptor'); + + if(typeof entityDescriptor.PartitionKey !== 'string') { + v.object(entityDescriptor.PartitionKey, 'entityDescriptor.PartitionKey'); + v.stringAllowEmpty(entityDescriptor.PartitionKey._, 'entityDescriptor.PartitionKey._'); + } + + if(typeof entityDescriptor.RowKey !== 'string') { + v.object(entityDescriptor.RowKey, 'entityDescriptor.RowKey'); + v.stringAllowEmpty(entityDescriptor.RowKey._, 'entityDescriptor.RowKey._'); + } + v.callback(callback); + }); + + var options = extend(true, {}, userOptions); + options.payloadFormat = options.payloadFormat || this.defaultPayloadFormat; + + var webResource = RequestHandler.constructEntityWebResource(operation, table, entityDescriptor, options); + + var processResponseCallback = function (responseObject, next) { + var finalCallback; + if (operation === TableConstants.Operations.DELETE) { + finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.response); + }; + } else { + responseObject.entityResponse = null; + if (!responseObject.error) { + responseObject.entityResponse = entityResult.parseEntity(responseObject.response, options.autoResolveProperties, options.propertyResolver, options.entityResolver); + } + + finalCallback = function (returnObject) { + callback(returnObject.error, returnObject.entityResponse, returnObject.response); + }; + } + + next(responseObject, finalCallback); + }; + + this.performRequest(webResource, webResource.body, options, processResponseCallback); +}; + +/** +* Retrieves a table URL. +* +* @param {string} table The table name. +* @param {string} [sasToken] The Shared Access Signature token. +* @param {boolean} [primary] A boolean representing whether to use the primary or the secondary endpoint. +* @return {string} The formatted URL string. +* @example +* var azure = require('azure-storage'); +* var tableService = azure.createTableService(); +* var sharedAccessPolicy = { +* AccessPolicy: { +* Permissions: azure.TableUtilities.SharedAccessPermissions.QUERY, +* Start: startDate, +* Expiry: expiryDate +* }, +* }; +* +* var sasToken = tableService.generateSharedAccessSignature(table, sharedAccessPolicy); +* var sasUrl = tableService.getUrl(table, sasToken); +*/ +TableService.prototype.getUrl = function (table, sasToken, primary) { + validate.validateArgs('getUrl', function (v) { + v.string(table, 'table'); + v.tableNameIsValid(table); + }); + + return this._getUrl(table, sasToken, primary); +}; + +/** +* Given the partition key, row key, property name, property value, +* and the property Edm type if given by the service, returns the Edm type of the property. +* @typedef {function} TableService~propertyResolver +* @param {object} pk The partition key. +* @param {object} rk The row key. +* @param {string} name The property name. +* @param {object} value The property value. +* @param {string} type The EDM type. +*/ + +/** +* Returns entities matched by a query. +* @callback TableService~queryResponse +* @param {object} error If an error occurs, the error information. +* @param {object} entries The entities returned by the query. +* @param {object} queryResultContinuation If more matching entities exist, and could not be returned, +* a continuation token that can be used to retrieve more results. +* @param {object} response Information related to this operation. +*/ + +module.exports = TableService; + + +/***/ }), + +/***/ 581: +/***/ ((module, exports) => { + +// +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Expose 'HeaderConstants'. +exports = module.exports; + +/** +* Defines constants, enums, and utility functions for use with the Table service. +* @namespace TableUtilities +*/ +var TableUtilities = { + /** + * Permission types. + * + * @const + * @enum {string} + */ + SharedAccessPermissions: { + QUERY: 'r', + ADD: 'a', + UPDATE: 'u', + DELETE: 'd' + }, + + /** + * Payload Format. + * + * @const + * @enum {string} + */ + PayloadFormat: { + FULL_METADATA: 'application/json;odata=fullmetadata', + MINIMAL_METADATA: 'application/json;odata=minimalmetadata', + NO_METADATA: 'application/json;odata=nometadata' + }, + + /** + * Defines the set of Boolean operators for constructing queries. + * + * @const + * @enum {string} + */ + TableOperators: { + AND: 'and', + NOT: 'not', + OR: 'or' + }, + + /** + * Filter property comparison operators. + * + * @const + * @enum {string} + */ + QueryComparisons: { + EQUAL: 'eq', + NOT_EQUAL: 'ne', + GREATER_THAN: 'gt', + GREATER_THAN_OR_EQUAL: 'ge', + LESS_THAN: 'lt', + LESS_THAN_OR_EQUAL: 'le' + }, + + /** + * Edm types. + * + * @const + * @enum {string} + */ + EdmType: { + STRING: 'Edm.String', + BINARY: 'Edm.Binary', + INT64: 'Edm.Int64', + INT32: 'Edm.Int32', + DOUBLE: 'Edm.Double', + DATETIME: 'Edm.DateTime', + GUID: 'Edm.Guid', + BOOLEAN: 'Edm.Boolean' + }, + + /** + * A helper to create table entities. + * + * @example + * var entGen = TableUtilities.entityGenerator; + * var entity = { PartitionKey: entGen.String('part2'), + * RowKey: entGen.String('row1'), + * boolValue: entGen.Boolean(true), + * intValue: entGen.Int32(42), + * dateValue: entGen.DateTime(new Date(Date.UTC(2011, 10, 25))), + * }; + */ + entityGenerator: (function() + { + var EntityProperty = function (value, type) { + var entityProperty = { _:value}; + if (type) { + entityProperty['$'] = type; + } + return entityProperty; + }; + + return { + EntityProperty : EntityProperty, + + Int32 : function(value) { + return new EntityProperty(value, 'Edm.Int32'); + }, + + Int64 : function(value) { + return new EntityProperty(value, 'Edm.Int64'); + }, + + Binary : function(value) { + return new EntityProperty(value, 'Edm.Binary'); + }, + + Boolean : function(value) { + return new EntityProperty(value, 'Edm.Boolean'); + }, + + String : function(value) { + return new EntityProperty(value, 'Edm.String'); + }, + + Guid : function(value) { + return new EntityProperty(value, 'Edm.Guid'); + }, + + Double : function(value) { + return new EntityProperty(value, 'Edm.Double'); + }, + + DateTime : function(value) { + return new EntityProperty(value, 'Edm.DateTime'); + } + }; + })() +}; + +module.exports = TableUtilities; + +/***/ }), + +/***/ 69861: +/***/ ((__unused_webpack_module, exports, __nccwpck_require__) => { + +// wrapper for non-node envs +;(function (sax) { + +sax.parser = function (strict, opt) { return new SAXParser(strict, opt) } +sax.SAXParser = SAXParser +sax.SAXStream = SAXStream +sax.createStream = createStream + +// When we pass the MAX_BUFFER_LENGTH position, start checking for buffer overruns. +// When we check, schedule the next check for MAX_BUFFER_LENGTH - (max(buffer lengths)), +// since that's the earliest that a buffer overrun could occur. This way, checks are +// as rare as required, but as often as necessary to ensure never crossing this bound. +// Furthermore, buffers are only tested at most once per write(), so passing a very +// large string into write() might have undesirable effects, but this is manageable by +// the caller, so it is assumed to be safe. Thus, a call to write() may, in the extreme +// edge case, result in creating at most one complete copy of the string passed in. +// Set to Infinity to have unlimited buffers. +sax.MAX_BUFFER_LENGTH = 64 * 1024 + +var buffers = [ + "comment", "sgmlDecl", "textNode", "tagName", "doctype", + "procInstName", "procInstBody", "entity", "attribName", + "attribValue", "cdata", "script" +] + +sax.EVENTS = // for discoverability. + [ "text" + , "processinginstruction" + , "sgmldeclaration" + , "doctype" + , "comment" + , "attribute" + , "opentag" + , "closetag" + , "opencdata" + , "cdata" + , "closecdata" + , "error" + , "end" + , "ready" + , "script" + , "opennamespace" + , "closenamespace" + ] + +function SAXParser (strict, opt) { + if (!(this instanceof SAXParser)) return new SAXParser(strict, opt) + + var parser = this + clearBuffers(parser) + parser.q = parser.c = "" + parser.bufferCheckPosition = sax.MAX_BUFFER_LENGTH + parser.opt = opt || {} + parser.opt.lowercase = parser.opt.lowercase || parser.opt.lowercasetags + parser.looseCase = parser.opt.lowercase ? "toLowerCase" : "toUpperCase" + parser.tags = [] + parser.closed = parser.closedRoot = parser.sawRoot = false + parser.tag = parser.error = null + parser.strict = !!strict + parser.noscript = !!(strict || parser.opt.noscript) + parser.state = S.BEGIN + parser.ENTITIES = Object.create(sax.ENTITIES) + parser.attribList = [] + + // namespaces form a prototype chain. + // it always points at the current tag, + // which protos to its parent tag. + if (parser.opt.xmlns) parser.ns = Object.create(rootNS) + + // mostly just for error reporting + parser.trackPosition = parser.opt.position !== false + if (parser.trackPosition) { + parser.position = parser.line = parser.column = 0 + } + emit(parser, "onready") +} + +if (!Object.create) Object.create = function (o) { + function f () { this.__proto__ = o } + f.prototype = o + return new f +} + +if (!Object.getPrototypeOf) Object.getPrototypeOf = function (o) { + return o.__proto__ +} + +if (!Object.keys) Object.keys = function (o) { + var a = [] + for (var i in o) if (o.hasOwnProperty(i)) a.push(i) + return a +} + +function checkBufferLength (parser) { + var maxAllowed = Math.max(sax.MAX_BUFFER_LENGTH, 10) + , maxActual = 0 + for (var i = 0, l = buffers.length; i < l; i ++) { + var len = parser[buffers[i]].length + if (len > maxAllowed) { + // Text/cdata nodes can get big, and since they're buffered, + // we can get here under normal conditions. + // Avoid issues by emitting the text node now, + // so at least it won't get any bigger. + switch (buffers[i]) { + case "textNode": + closeText(parser) + break + + case "cdata": + emitNode(parser, "oncdata", parser.cdata) + parser.cdata = "" + break + + case "script": + emitNode(parser, "onscript", parser.script) + parser.script = "" + break + + default: + error(parser, "Max buffer length exceeded: "+buffers[i]) + } + } + maxActual = Math.max(maxActual, len) + } + // schedule the next check for the earliest possible buffer overrun. + parser.bufferCheckPosition = (sax.MAX_BUFFER_LENGTH - maxActual) + + parser.position +} + +function clearBuffers (parser) { + for (var i = 0, l = buffers.length; i < l; i ++) { + parser[buffers[i]] = "" + } +} + +function flushBuffers (parser) { + closeText(parser) + if (parser.cdata !== "") { + emitNode(parser, "oncdata", parser.cdata) + parser.cdata = "" + } + if (parser.script !== "") { + emitNode(parser, "onscript", parser.script) + parser.script = "" + } +} + +SAXParser.prototype = + { end: function () { end(this) } + , write: write + , resume: function () { this.error = null; return this } + , close: function () { return this.write(null) } + , flush: function () { flushBuffers(this) } + } + +try { + var Stream = (__nccwpck_require__(12781).Stream) +} catch (ex) { + var Stream = function () {} +} + + +var streamWraps = sax.EVENTS.filter(function (ev) { + return ev !== "error" && ev !== "end" +}) + +function createStream (strict, opt) { + return new SAXStream(strict, opt) +} + +function SAXStream (strict, opt) { + if (!(this instanceof SAXStream)) return new SAXStream(strict, opt) + + Stream.apply(this) + + this._parser = new SAXParser(strict, opt) + this.writable = true + this.readable = true + + + var me = this + + this._parser.onend = function () { + me.emit("end") + } + + this._parser.onerror = function (er) { + me.emit("error", er) + + // if didn't throw, then means error was handled. + // go ahead and clear error, so we can write again. + me._parser.error = null + } + + this._decoder = null; + + streamWraps.forEach(function (ev) { + Object.defineProperty(me, "on" + ev, { + get: function () { return me._parser["on" + ev] }, + set: function (h) { + if (!h) { + me.removeAllListeners(ev) + return me._parser["on"+ev] = h + } + me.on(ev, h) + }, + enumerable: true, + configurable: false + }) + }) +} + +SAXStream.prototype = Object.create(Stream.prototype, + { constructor: { value: SAXStream } }) + +SAXStream.prototype.write = function (data) { + if (typeof Buffer === 'function' && + typeof Buffer.isBuffer === 'function' && + Buffer.isBuffer(data)) { + if (!this._decoder) { + var SD = (__nccwpck_require__(71576).StringDecoder) + this._decoder = new SD('utf8') + } + data = this._decoder.write(data); + } + + this._parser.write(data.toString()) + this.emit("data", data) + return true +} + +SAXStream.prototype.end = function (chunk) { + if (chunk && chunk.length) this.write(chunk) + this._parser.end() + return true +} + +SAXStream.prototype.on = function (ev, handler) { + var me = this + if (!me._parser["on"+ev] && streamWraps.indexOf(ev) !== -1) { + me._parser["on"+ev] = function () { + var args = arguments.length === 1 ? [arguments[0]] + : Array.apply(null, arguments) + args.splice(0, 0, ev) + me.emit.apply(me, args) + } + } + + return Stream.prototype.on.call(me, ev, handler) +} + + + +// character classes and tokens +var whitespace = "\r\n\t " + // this really needs to be replaced with character classes. + // XML allows all manner of ridiculous numbers and digits. + , number = "0124356789" + , letter = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" + // (Letter | "_" | ":") + , quote = "'\"" + , entity = number+letter+"#" + , attribEnd = whitespace + ">" + , CDATA = "[CDATA[" + , DOCTYPE = "DOCTYPE" + , XML_NAMESPACE = "http://www.w3.org/XML/1998/namespace" + , XMLNS_NAMESPACE = "http://www.w3.org/2000/xmlns/" + , rootNS = { xml: XML_NAMESPACE, xmlns: XMLNS_NAMESPACE } + +// turn all the string character sets into character class objects. +whitespace = charClass(whitespace) +number = charClass(number) +letter = charClass(letter) + +// http://www.w3.org/TR/REC-xml/#NT-NameStartChar +// This implementation works on strings, a single character at a time +// as such, it cannot ever support astral-plane characters (10000-EFFFF) +// without a significant breaking change to either this parser, or the +// JavaScript language. Implementation of an emoji-capable xml parser +// is left as an exercise for the reader. +var nameStart = /[:_A-Za-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD]/ + +var nameBody = /[:_A-Za-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD\u00B7\u0300-\u036F\u203F-\u2040\.\d-]/ + +quote = charClass(quote) +entity = charClass(entity) +attribEnd = charClass(attribEnd) + +function charClass (str) { + return str.split("").reduce(function (s, c) { + s[c] = true + return s + }, {}) +} + +function isRegExp (c) { + return Object.prototype.toString.call(c) === '[object RegExp]' +} + +function is (charclass, c) { + return isRegExp(charclass) ? !!c.match(charclass) : charclass[c] +} + +function not (charclass, c) { + return !is(charclass, c) +} + +var S = 0 +sax.STATE = +{ BEGIN : S++ +, TEXT : S++ // general stuff +, TEXT_ENTITY : S++ // & and such. +, OPEN_WAKA : S++ // < +, SGML_DECL : S++ // +, SCRIPT : S++ //