Add custom chunked upload handlers
This commit is contained in:
parent
7a8061adec
commit
1769fb1498
@ -335,7 +335,6 @@ func quickSetup(flags *pflag.FlagSet, d pythonData) {
|
|||||||
AuthMethod: "",
|
AuthMethod: "",
|
||||||
Branding: settings.Branding{},
|
Branding: settings.Branding{},
|
||||||
Tus: settings.Tus{
|
Tus: settings.Tus{
|
||||||
Enabled: true,
|
|
||||||
ChunkSize: settings.DefaultTusChunkSize,
|
ChunkSize: settings.DefaultTusChunkSize,
|
||||||
RetryCount: settings.DefaultTusRetryCount,
|
RetryCount: settings.DefaultTusRetryCount,
|
||||||
},
|
},
|
||||||
|
|||||||
@ -2,6 +2,7 @@ import * as tus from "tus-js-client";
|
|||||||
import { tusEndpoint, tusSettings } from "@/utils/constants";
|
import { tusEndpoint, tusSettings } from "@/utils/constants";
|
||||||
import store from "@/store";
|
import store from "@/store";
|
||||||
import { removePrefix } from "@/api/utils";
|
import { removePrefix } from "@/api/utils";
|
||||||
|
import { fetchURL } from "./utils";
|
||||||
|
|
||||||
const RETRY_BASE_DELAY = 1000;
|
const RETRY_BASE_DELAY = 1000;
|
||||||
const RETRY_MAX_DELAY = 20000;
|
const RETRY_MAX_DELAY = 20000;
|
||||||
@ -12,25 +13,20 @@ export async function upload(url, content = "", overwrite = false, onupload) {
|
|||||||
throw new Error("Tus.io settings are not defined");
|
throw new Error("Tus.io settings are not defined");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
url = removePrefix(url);
|
||||||
|
let resourceUrl = `${tusEndpoint}${url}?override=${overwrite}`;
|
||||||
|
|
||||||
|
await createUpload(resourceUrl);
|
||||||
|
|
||||||
return new Promise((resolve, reject) => {
|
return new Promise((resolve, reject) => {
|
||||||
const metadata = {
|
let upload = new tus.Upload(content, {
|
||||||
overwrite: overwrite.toString(),
|
uploadUrl: resourceUrl,
|
||||||
// url is URI encoded and needs to be decoded for metadata first
|
|
||||||
destination: decodeURIComponent(removePrefix(url)),
|
|
||||||
};
|
|
||||||
var upload = new tus.Upload(content, {
|
|
||||||
endpoint: tusEndpoint,
|
|
||||||
chunkSize: tusSettings.chunkSize,
|
chunkSize: tusSettings.chunkSize,
|
||||||
retryDelays: computeRetryDelays(tusSettings),
|
retryDelays: computeRetryDelays(tusSettings),
|
||||||
parallelUploads: 1,
|
parallelUploads: 1,
|
||||||
|
storeFingerprintForResuming: false,
|
||||||
headers: {
|
headers: {
|
||||||
"X-Auth": store.state.jwt,
|
"X-Auth": store.state.jwt,
|
||||||
// Send the metadata with every request
|
|
||||||
// If we used the tus client's metadata option, it would only be sent
|
|
||||||
// with some of the requests.
|
|
||||||
"Upload-Metadata": Object.entries(metadata)
|
|
||||||
.map(([key, value]) => `${key} ${btoa(value)}`)
|
|
||||||
.join(","),
|
|
||||||
},
|
},
|
||||||
onError: function (error) {
|
onError: function (error) {
|
||||||
reject("Upload failed: " + error);
|
reject("Upload failed: " + error);
|
||||||
@ -43,22 +39,24 @@ export async function upload(url, content = "", overwrite = false, onupload) {
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
onSuccess: function () {
|
onSuccess: function () {
|
||||||
// Remove the upload from the storage when completed.
|
|
||||||
// Otherwise, old storage keys aren't overwritten, which
|
|
||||||
// lets resumable uploads fail.
|
|
||||||
upload._removeFromUrlStorage();
|
|
||||||
resolve();
|
resolve();
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
upload.findPreviousUploads().then(function (previousUploads) {
|
|
||||||
if (previousUploads.length) {
|
|
||||||
upload.resumeFromPreviousUpload(previousUploads[0]);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
upload.start();
|
upload.start();
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async function createUpload(resourceUrl) {
|
||||||
|
let headResp = await fetchURL(resourceUrl, {
|
||||||
|
method: "POST",
|
||||||
|
});
|
||||||
|
if (headResp.status !== 201) {
|
||||||
|
throw new Error(
|
||||||
|
`Failed to create an upload: ${headResp.status} ${headResp.statusText}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
function computeRetryDelays(tusSettings) {
|
function computeRetryDelays(tusSettings) {
|
||||||
if (!tusSettings.retryCount || tusSettings.retryCount < 1) {
|
if (!tusSettings.retryCount || tusSettings.retryCount < 1) {
|
||||||
// Disable retries altogether
|
// Disable retries altogether
|
||||||
@ -79,16 +77,7 @@ function computeRetryDelays(tusSettings) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
export async function useTus(content) {
|
export async function useTus(content) {
|
||||||
if (!isTusSupported() || !(content instanceof Blob)) {
|
return isTusSupported() && content instanceof Blob;
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// use tus if tus uploads are enabled and the content's size is larger than chunkSize
|
|
||||||
return (
|
|
||||||
tusSettings &&
|
|
||||||
tusSettings.enabled === true &&
|
|
||||||
content.size > tusSettings.chunkSize
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function isTusSupported() {
|
function isTusSupported() {
|
||||||
|
|||||||
@ -17,6 +17,14 @@
|
|||||||
>
|
>
|
||||||
{{ $t("buttons.cancel") }}
|
{{ $t("buttons.cancel") }}
|
||||||
</button>
|
</button>
|
||||||
|
<button
|
||||||
|
class="button button--flat button--blue"
|
||||||
|
@click="showAction"
|
||||||
|
:aria-label="$t('buttons.continue')"
|
||||||
|
:title="$t('buttons.continue')"
|
||||||
|
>
|
||||||
|
{{ $t("buttons.continue") }}
|
||||||
|
</button>
|
||||||
<button
|
<button
|
||||||
class="button button--flat button--red"
|
class="button button--flat button--red"
|
||||||
@click="showConfirm"
|
@click="showConfirm"
|
||||||
@ -34,6 +42,6 @@ import { mapState } from "vuex";
|
|||||||
|
|
||||||
export default {
|
export default {
|
||||||
name: "replace",
|
name: "replace",
|
||||||
computed: mapState(["showConfirm"]),
|
computed: mapState(["showConfirm", "showAction"]),
|
||||||
};
|
};
|
||||||
</script>
|
</script>
|
||||||
|
|||||||
@ -37,7 +37,8 @@
|
|||||||
"toggleSidebar": "Toggle sidebar",
|
"toggleSidebar": "Toggle sidebar",
|
||||||
"update": "Update",
|
"update": "Update",
|
||||||
"upload": "Upload",
|
"upload": "Upload",
|
||||||
"openFile": "Open file"
|
"openFile": "Open file",
|
||||||
|
"continue": "Continue"
|
||||||
},
|
},
|
||||||
"download": {
|
"download": {
|
||||||
"downloadFile": "Download File",
|
"downloadFile": "Download File",
|
||||||
@ -148,7 +149,7 @@
|
|||||||
"rename": "Rename",
|
"rename": "Rename",
|
||||||
"renameMessage": "Insert a new name for",
|
"renameMessage": "Insert a new name for",
|
||||||
"replace": "Replace",
|
"replace": "Replace",
|
||||||
"replaceMessage": "One of the files you're trying to upload is conflicting because of its name. Do you wish to replace the existing one?\n",
|
"replaceMessage": "One of the files you're trying to upload is conflicting because of its name. Do you wish to continue to upload or replace the existing one?\n",
|
||||||
"schedule": "Schedule",
|
"schedule": "Schedule",
|
||||||
"scheduleMessage": "Pick a date and time to schedule the publication of this post.",
|
"scheduleMessage": "Pick a date and time to schedule the publication of this post.",
|
||||||
"show": "Show",
|
"show": "Show",
|
||||||
@ -186,10 +187,9 @@
|
|||||||
"commandsUpdated": "Commands updated!",
|
"commandsUpdated": "Commands updated!",
|
||||||
"createUserDir": "Auto create user home dir while adding new user",
|
"createUserDir": "Auto create user home dir while adding new user",
|
||||||
"tusUploads": "Chunked Uploads",
|
"tusUploads": "Chunked Uploads",
|
||||||
"tusUploadsHelp": "File Browser supports the tus.io protocol for resumable file uploads, allowing for the creation of efficient, reliable, resumable and chunked file uploads even on unreliable networks.",
|
"tusUploadsHelp": "File Browser supports chunked file uploads, allowing for the creation of efficient, reliable, resumable and chunked file uploads even on unreliable networks.",
|
||||||
"tusUploadsEnabled": "Enable chunked file uploads",
|
"tusUploadsChunkSize": "Indicates to maximum size of a request (direct uploads will be used for smaller uploads). You may input a plain integer denoting a bytes input or a string like 10MB, 1GB etc.",
|
||||||
"tusUploadsChunkSize": "Indicates to maximum size of a request (direct uploads will be used for smaller uploads). You may input a plain integer denoting a bytes input or a string like 10MB, 1.00Ti etc.",
|
"tusUploadsRetryCount": "Number of retries to perform if a chunk fails to upload.",
|
||||||
"tusUploadsRetryCount": "Number of times to retry a failed upload (set to 0 to disable retries)",
|
|
||||||
"userHomeBasePath": "Base path for user home directories",
|
"userHomeBasePath": "Base path for user home directories",
|
||||||
"userScopeGenerationPlaceholder": "The scope will be auto generated",
|
"userScopeGenerationPlaceholder": "The scope will be auto generated",
|
||||||
"createUserHomeDirectory": "Create user home directory",
|
"createUserHomeDirectory": "Create user home directory",
|
||||||
|
|||||||
@ -23,6 +23,7 @@ const state = {
|
|||||||
show: null,
|
show: null,
|
||||||
showShell: false,
|
showShell: false,
|
||||||
showConfirm: null,
|
showConfirm: null,
|
||||||
|
showAction: null,
|
||||||
};
|
};
|
||||||
|
|
||||||
export default new Vuex.Store({
|
export default new Vuex.Store({
|
||||||
|
|||||||
@ -5,6 +5,7 @@ const mutations = {
|
|||||||
closeHovers: (state) => {
|
closeHovers: (state) => {
|
||||||
state.show = null;
|
state.show = null;
|
||||||
state.showConfirm = null;
|
state.showConfirm = null;
|
||||||
|
state.showAction = null;
|
||||||
},
|
},
|
||||||
toggleShell: (state) => {
|
toggleShell: (state) => {
|
||||||
state.showShell = !state.showShell;
|
state.showShell = !state.showShell;
|
||||||
@ -17,6 +18,9 @@ const mutations = {
|
|||||||
|
|
||||||
state.show = value.prompt;
|
state.show = value.prompt;
|
||||||
state.showConfirm = value.confirm;
|
state.showConfirm = value.confirm;
|
||||||
|
if (value.action !== undefined) {
|
||||||
|
state.showAction = value.action;
|
||||||
|
}
|
||||||
},
|
},
|
||||||
showError: (state) => {
|
showError: (state) => {
|
||||||
state.show = "error";
|
state.show = "error";
|
||||||
|
|||||||
@ -694,6 +694,11 @@ export default {
|
|||||||
if (conflict) {
|
if (conflict) {
|
||||||
this.$store.commit("showHover", {
|
this.$store.commit("showHover", {
|
||||||
prompt: "replace",
|
prompt: "replace",
|
||||||
|
action: (event) => {
|
||||||
|
event.preventDefault();
|
||||||
|
this.$store.commit("closeHovers");
|
||||||
|
upload.handleFiles(files, path, false);
|
||||||
|
},
|
||||||
confirm: (event) => {
|
confirm: (event) => {
|
||||||
event.preventDefault();
|
event.preventDefault();
|
||||||
this.$store.commit("closeHovers");
|
this.$store.commit("closeHovers");
|
||||||
@ -729,6 +734,11 @@ export default {
|
|||||||
if (conflict) {
|
if (conflict) {
|
||||||
this.$store.commit("showHover", {
|
this.$store.commit("showHover", {
|
||||||
prompt: "replace",
|
prompt: "replace",
|
||||||
|
action: (event) => {
|
||||||
|
event.preventDefault();
|
||||||
|
this.$store.commit("closeHovers");
|
||||||
|
upload.handleFiles(files, path, false);
|
||||||
|
},
|
||||||
confirm: (event) => {
|
confirm: (event) => {
|
||||||
event.preventDefault();
|
event.preventDefault();
|
||||||
this.$store.commit("closeHovers");
|
this.$store.commit("closeHovers");
|
||||||
|
|||||||
@ -106,38 +106,31 @@
|
|||||||
|
|
||||||
<p class="small">{{ $t("settings.tusUploadsHelp") }}</p>
|
<p class="small">{{ $t("settings.tusUploadsHelp") }}</p>
|
||||||
|
|
||||||
<p>
|
|
||||||
<input
|
|
||||||
type="checkbox"
|
|
||||||
v-model="settings.tus.enabled"
|
|
||||||
id="tus-enabled"
|
|
||||||
/>
|
|
||||||
{{ $t("settings.tusUploadsEnabled") }}
|
|
||||||
</p>
|
|
||||||
|
|
||||||
<div class="tusConditionalSettings">
|
<div class="tusConditionalSettings">
|
||||||
<label for="tus-chunkSize">{{
|
<p>
|
||||||
$t("settings.tusUploadsChunkSize")
|
<label for="tus-chunkSize">{{
|
||||||
}}</label>
|
$t("settings.tusUploadsChunkSize")
|
||||||
<input
|
}}</label>
|
||||||
class="input input--block"
|
<input
|
||||||
type="text"
|
class="input input--block"
|
||||||
v-model="formattedChunkSize"
|
type="text"
|
||||||
id="tus-chunkSize"
|
v-model="formattedChunkSize"
|
||||||
v-bind:disabled="!settings.tus.enabled"
|
id="tus-chunkSize"
|
||||||
/>
|
/>
|
||||||
|
</p>
|
||||||
|
|
||||||
<label for="tus-retryCount">{{
|
<p>
|
||||||
$t("settings.tusUploadsRetryCount")
|
<label for="tus-retryCount">{{
|
||||||
}}</label>
|
$t("settings.tusUploadsRetryCount")
|
||||||
<input
|
}}</label>
|
||||||
class="input input--block"
|
<input
|
||||||
type="number"
|
class="input input--block"
|
||||||
v-model.number="settings.tus.retryCount"
|
type="number"
|
||||||
id="tus-retryCount"
|
v-model.number="settings.tus.retryCount"
|
||||||
v-bind:disabled="!settings.tus.enabled"
|
id="tus-retryCount"
|
||||||
min="0"
|
min="0"
|
||||||
/>
|
/>
|
||||||
|
</p>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
@ -334,36 +327,35 @@ export default {
|
|||||||
},
|
},
|
||||||
// Parse the user-friendly input (e.g., "20M" or "1T") to bytes
|
// Parse the user-friendly input (e.g., "20M" or "1T") to bytes
|
||||||
parseBytes(input) {
|
parseBytes(input) {
|
||||||
const regex = /^(\d+\.?\d*)([BKMGT]?[B|I]?)$/i;
|
const regex = /^(\d+)(\.\d+)?(B|K|KB|M|MB|G|GB|T|TB)?$/i;
|
||||||
const matches = input.match(regex);
|
const matches = input.match(regex);
|
||||||
if (matches) {
|
if (matches) {
|
||||||
const size = parseFloat(matches[1]);
|
const size = parseFloat(matches[1].concat(matches[2] || ""));
|
||||||
const unit = matches[2].toUpperCase();
|
let unit = matches[3].toUpperCase();
|
||||||
|
if (!unit.endsWith("B")) {
|
||||||
|
unit += "B";
|
||||||
|
}
|
||||||
const units = {
|
const units = {
|
||||||
KB: 1e3,
|
KB: 1024,
|
||||||
MB: 1e6,
|
MB: 1024 ** 2,
|
||||||
GB: 1e9,
|
GB: 1024 ** 3,
|
||||||
TB: 1e12,
|
TB: 1024 ** 4,
|
||||||
KI: 1024,
|
|
||||||
MI: 1024 ** 2,
|
|
||||||
GI: 1024 ** 3,
|
|
||||||
TI: 1024 ** 4,
|
|
||||||
};
|
};
|
||||||
return size * (units[unit] || 1);
|
return size * (units[unit] || 1);
|
||||||
} else {
|
} else {
|
||||||
return 0;
|
return 1024 ** 2;
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
// Format the chunk size in bytes to user-friendly format
|
// Format the chunk size in bytes to user-friendly format
|
||||||
formatBytes(bytes) {
|
formatBytes(bytes) {
|
||||||
const units = ["B", "Ki", "Mi", "Gi", "Ti"];
|
const units = ["B", "KB", "MB", "GB", "TB"];
|
||||||
let size = bytes;
|
let size = bytes;
|
||||||
let unitIndex = 0;
|
let unitIndex = 0;
|
||||||
while (size >= 1024 && unitIndex < units.length - 1) {
|
while (size >= 1024 && unitIndex < units.length - 1) {
|
||||||
size /= 1024;
|
size /= 1024;
|
||||||
unitIndex++;
|
unitIndex++;
|
||||||
}
|
}
|
||||||
return `${size.toFixed(2)}${units[unitIndex]}`;
|
return `${size}${units[unitIndex]}`;
|
||||||
},
|
},
|
||||||
// Clear the debounce timeout when the component is destroyed
|
// Clear the debounce timeout when the component is destroyed
|
||||||
beforeDestroy() {
|
beforeDestroy() {
|
||||||
|
|||||||
18
go.mod
18
go.mod
@ -1,6 +1,6 @@
|
|||||||
module github.com/filebrowser/filebrowser/v2
|
module github.com/filebrowser/filebrowser/v2
|
||||||
|
|
||||||
go 1.18
|
go 1.20
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/asdine/storm/v3 v3.2.1
|
github.com/asdine/storm/v3 v3.2.1
|
||||||
@ -20,20 +20,18 @@ require (
|
|||||||
github.com/spf13/cobra v1.6.1
|
github.com/spf13/cobra v1.6.1
|
||||||
github.com/spf13/pflag v1.0.5
|
github.com/spf13/pflag v1.0.5
|
||||||
github.com/spf13/viper v1.15.0
|
github.com/spf13/viper v1.15.0
|
||||||
github.com/stretchr/testify v1.8.2
|
github.com/stretchr/testify v1.8.4
|
||||||
github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce
|
github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce
|
||||||
github.com/tus/tusd v1.11.0
|
|
||||||
go.etcd.io/bbolt v1.3.7
|
go.etcd.io/bbolt v1.3.7
|
||||||
golang.org/x/crypto v0.6.0
|
golang.org/x/crypto v0.10.0
|
||||||
golang.org/x/image v0.5.0
|
golang.org/x/image v0.5.0
|
||||||
golang.org/x/text v0.8.0
|
golang.org/x/text v0.10.0
|
||||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1
|
gopkg.in/natefinch/lumberjack.v2 v2.2.1
|
||||||
gopkg.in/yaml.v2 v2.4.0
|
gopkg.in/yaml.v2 v2.4.0
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/andybalholm/brotli v1.0.4 // indirect
|
github.com/andybalholm/brotli v1.0.4 // indirect
|
||||||
github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40 // indirect
|
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
|
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
|
||||||
github.com/dsoprea/go-logging v0.0.0-20200517223158-a10564966e9d // indirect
|
github.com/dsoprea/go-logging v0.0.0-20200517223158-a10564966e9d // indirect
|
||||||
@ -42,11 +40,13 @@ require (
|
|||||||
github.com/go-errors/errors v1.1.1 // indirect
|
github.com/go-errors/errors v1.1.1 // indirect
|
||||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||||
github.com/golang/geo v0.0.0-20200319012246-673a6f80352d // indirect
|
github.com/golang/geo v0.0.0-20200319012246-673a6f80352d // indirect
|
||||||
|
github.com/golang/protobuf v1.5.3 // indirect
|
||||||
github.com/golang/snappy v0.0.4 // indirect
|
github.com/golang/snappy v0.0.4 // indirect
|
||||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||||
github.com/inconshreveable/mousetrap v1.0.1 // indirect
|
github.com/inconshreveable/mousetrap v1.0.1 // indirect
|
||||||
github.com/klauspost/compress v1.15.9 // indirect
|
github.com/klauspost/compress v1.15.9 // indirect
|
||||||
github.com/klauspost/pgzip v1.2.5 // indirect
|
github.com/klauspost/pgzip v1.2.5 // indirect
|
||||||
|
github.com/kr/pretty v0.3.1 // indirect
|
||||||
github.com/magiconair/properties v1.8.7 // indirect
|
github.com/magiconair/properties v1.8.7 // indirect
|
||||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||||
github.com/nwaples/rardecode v1.1.0 // indirect
|
github.com/nwaples/rardecode v1.1.0 // indirect
|
||||||
@ -59,8 +59,10 @@ require (
|
|||||||
github.com/ulikunitz/xz v0.5.9 // indirect
|
github.com/ulikunitz/xz v0.5.9 // indirect
|
||||||
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect
|
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect
|
||||||
github.com/yusufpapurcu/wmi v1.2.2 // indirect
|
github.com/yusufpapurcu/wmi v1.2.2 // indirect
|
||||||
golang.org/x/net v0.8.0 // indirect
|
golang.org/x/net v0.11.0 // indirect
|
||||||
golang.org/x/sys v0.6.0 // indirect
|
golang.org/x/sys v0.9.0 // indirect
|
||||||
|
google.golang.org/protobuf v1.31.0 // indirect
|
||||||
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
||||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
)
|
)
|
||||||
|
|||||||
@ -65,12 +65,9 @@ func NewHandler(
|
|||||||
api.PathPrefix("/resources").Handler(monkey(resourcePutHandler, "/api/resources")).Methods("PUT")
|
api.PathPrefix("/resources").Handler(monkey(resourcePutHandler, "/api/resources")).Methods("PUT")
|
||||||
api.PathPrefix("/resources").Handler(monkey(resourcePatchHandler(fileCache), "/api/resources")).Methods("PATCH")
|
api.PathPrefix("/resources").Handler(monkey(resourcePatchHandler(fileCache), "/api/resources")).Methods("PATCH")
|
||||||
|
|
||||||
const tusPath = "/tus"
|
api.PathPrefix("/tus").Handler(monkey(tusPostHandler(), "/api/tus")).Methods("POST")
|
||||||
tusHandler, err := NewTusHandler(store, server, "/api"+tusPath)
|
api.PathPrefix("/tus").Handler(monkey(tusHeadHandler(), "/api/tus")).Methods("HEAD")
|
||||||
if err != nil {
|
api.PathPrefix("/tus").Handler(monkey(tusPatchHandler(), "/api/tus")).Methods("PATCH")
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
api.PathPrefix(tusPath).Handler(tusHandler)
|
|
||||||
|
|
||||||
api.PathPrefix("/usage").Handler(monkey(diskUsage, "/api/usage")).Methods("GET")
|
api.PathPrefix("/usage").Handler(monkey(diskUsage, "/api/usage")).Methods("GET")
|
||||||
|
|
||||||
|
|||||||
122
http/tus.go
122
http/tus.go
@ -1,122 +0,0 @@
|
|||||||
package http
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
tusd "github.com/tus/tusd/pkg/handler"
|
|
||||||
|
|
||||||
"github.com/filebrowser/filebrowser/v2/settings"
|
|
||||||
"github.com/filebrowser/filebrowser/v2/storage"
|
|
||||||
)
|
|
||||||
|
|
||||||
type TusHandler struct {
|
|
||||||
store *storage.Storage
|
|
||||||
server *settings.Server
|
|
||||||
settings *settings.Settings
|
|
||||||
tusdHandlers map[uint]*tusd.UnroutedHandler
|
|
||||||
apiPath string
|
|
||||||
mutex *sync.Mutex
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewTusHandler(store *storage.Storage, server *settings.Server, apiPath string) (_ *TusHandler, err error) {
|
|
||||||
tusHandler := &TusHandler{}
|
|
||||||
tusHandler.store = store
|
|
||||||
tusHandler.server = server
|
|
||||||
tusHandler.tusdHandlers = make(map[uint]*tusd.UnroutedHandler)
|
|
||||||
tusHandler.apiPath = apiPath
|
|
||||||
tusHandler.mutex = &sync.Mutex{}
|
|
||||||
|
|
||||||
if tusHandler.settings, err = store.Settings.Get(); err != nil {
|
|
||||||
return tusHandler, fmt.Errorf("couldn't get settings: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return tusHandler, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (th *TusHandler) getOrCreateTusdHandler(d *data, r *http.Request) (_ *tusd.UnroutedHandler, err error) {
|
|
||||||
// Use a mutex to make sure only one tus handler is created for each user
|
|
||||||
th.mutex.Lock()
|
|
||||||
defer th.mutex.Unlock()
|
|
||||||
|
|
||||||
tusdHandler, ok := th.tusdHandlers[d.user.ID]
|
|
||||||
if !ok {
|
|
||||||
// If we don't define an absolute URL for tusd, it creates an absolute URL for us that the client will use.
|
|
||||||
// See tusd/handler/unrouted_handler.go/absFileURL() for details.
|
|
||||||
// This URL's scheme will be http in our case (as we don't use tusd's inbuilt TLS feature),
|
|
||||||
// which is fine if we don't use both a browser and a reverse proxy that terminates SSL for us.
|
|
||||||
// In case we do, we need to define an absolute URL with the correct scheme, or we'll get mixed content errors.
|
|
||||||
// We can extract the correct scheme and host from the origin request header, if it exists (which always is the case for browsers).
|
|
||||||
var origin string
|
|
||||||
if originHeader, ok := r.Header["Origin"]; ok && len(originHeader) > 0 {
|
|
||||||
origin = originHeader[0]
|
|
||||||
}
|
|
||||||
basePath, err := url.JoinPath(origin, th.server.BaseURL, th.apiPath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Printf("Creating tus handler for user %s on path %s\n", d.user.Username, basePath)
|
|
||||||
tusdHandler, err = th.createTusdHandler(d, basePath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
th.tusdHandlers[d.user.ID] = tusdHandler
|
|
||||||
}
|
|
||||||
|
|
||||||
return tusdHandler, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (th TusHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|
||||||
code, err := withUser(func(w http.ResponseWriter, r *http.Request, d *data) (int, error) {
|
|
||||||
// Create a new tus handler for current user if it doesn't exist yet
|
|
||||||
tusdHandler, err := th.getOrCreateTusdHandler(d, r)
|
|
||||||
if err != nil {
|
|
||||||
return http.StatusBadRequest, err
|
|
||||||
}
|
|
||||||
|
|
||||||
switch r.Method {
|
|
||||||
case "POST":
|
|
||||||
tusdHandler.PostFile(w, r)
|
|
||||||
case "HEAD":
|
|
||||||
tusdHandler.HeadFile(w, r)
|
|
||||||
case "PATCH":
|
|
||||||
tusdHandler.PatchFile(w, r)
|
|
||||||
default:
|
|
||||||
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Isn't used
|
|
||||||
return http.StatusNoContent, nil
|
|
||||||
})(w, r, &data{
|
|
||||||
store: th.store,
|
|
||||||
settings: th.settings,
|
|
||||||
server: th.server,
|
|
||||||
})
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case err != nil:
|
|
||||||
http.Error(w, err.Error(), code)
|
|
||||||
case code >= http.StatusBadRequest:
|
|
||||||
http.Error(w, "", code)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (th TusHandler) createTusdHandler(d *data, basePath string) (*tusd.UnroutedHandler, error) {
|
|
||||||
tusStore := NewInPlaceDataStore(d.user.FullPath("/"), d.user.Perm.Create, d.user.Perm.Modify)
|
|
||||||
composer := tusd.NewStoreComposer()
|
|
||||||
tusStore.UseIn(composer)
|
|
||||||
|
|
||||||
tusdHandler, err := tusd.NewUnroutedHandler(tusd.Config{
|
|
||||||
BasePath: basePath,
|
|
||||||
StoreComposer: composer,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("unable to create tusdHandler: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return tusdHandler, nil
|
|
||||||
}
|
|
||||||
153
http/tus_handlers.go
Normal file
153
http/tus_handlers.go
Normal file
@ -0,0 +1,153 @@
|
|||||||
|
package http
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"github.com/filebrowser/filebrowser/v2/files"
|
||||||
|
"github.com/spf13/afero"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
func tusPostHandler() handleFunc {
|
||||||
|
return withUser(func(w http.ResponseWriter, r *http.Request, d *data) (int, error) {
|
||||||
|
file, err := files.NewFileInfo(files.FileOptions{
|
||||||
|
Fs: d.user.Fs,
|
||||||
|
Path: r.URL.Path,
|
||||||
|
Modify: d.user.Perm.Modify,
|
||||||
|
Expand: false,
|
||||||
|
ReadHeader: d.server.TypeDetectionByHeader,
|
||||||
|
Checker: d,
|
||||||
|
})
|
||||||
|
switch {
|
||||||
|
case errors.Is(err, afero.ErrFileNotFound):
|
||||||
|
if !d.user.Perm.Create || !d.Check(r.URL.Path) {
|
||||||
|
return http.StatusForbidden, nil
|
||||||
|
}
|
||||||
|
case err != nil:
|
||||||
|
return errToStatus(err), err
|
||||||
|
}
|
||||||
|
|
||||||
|
fileFlags := os.O_CREATE
|
||||||
|
if r.URL.Query().Get("override") == "true" {
|
||||||
|
fileFlags |= os.O_TRUNC
|
||||||
|
}
|
||||||
|
|
||||||
|
// if file exists
|
||||||
|
if file != nil {
|
||||||
|
if file.IsDir {
|
||||||
|
return http.StatusBadRequest, fmt.Errorf("cannot upload to a directory %s", file.RealPath())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
openFile, err := d.user.Fs.OpenFile(r.URL.Path, fileFlags, 0664)
|
||||||
|
if err != nil {
|
||||||
|
return errToStatus(err), err
|
||||||
|
}
|
||||||
|
if err := openFile.Close(); err != nil {
|
||||||
|
return errToStatus(err), err
|
||||||
|
}
|
||||||
|
|
||||||
|
return http.StatusCreated, nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func tusHeadHandler() handleFunc {
|
||||||
|
return withUser(func(w http.ResponseWriter, r *http.Request, d *data) (int, error) {
|
||||||
|
w.Header().Set("Cache-Control", "no-store")
|
||||||
|
if !d.Check(r.URL.Path) {
|
||||||
|
return http.StatusForbidden, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
file, err := files.NewFileInfo(files.FileOptions{
|
||||||
|
Fs: d.user.Fs,
|
||||||
|
Path: r.URL.Path,
|
||||||
|
Modify: d.user.Perm.Modify,
|
||||||
|
Expand: false,
|
||||||
|
ReadHeader: d.server.TypeDetectionByHeader,
|
||||||
|
Checker: d,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return errToStatus(err), err
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Header().Set("Upload-Offset", strconv.FormatInt(file.Size, 10))
|
||||||
|
w.Header().Set("Upload-Length", "-1")
|
||||||
|
|
||||||
|
return http.StatusOK, nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func tusPatchHandler() handleFunc {
|
||||||
|
return withUser(func(w http.ResponseWriter, r *http.Request, d *data) (int, error) {
|
||||||
|
if !d.user.Perm.Modify || !d.Check(r.URL.Path) {
|
||||||
|
return http.StatusForbidden, nil
|
||||||
|
}
|
||||||
|
if r.Header.Get("Content-Type") != "application/offset+octet-stream" {
|
||||||
|
return http.StatusUnsupportedMediaType, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
uploadOffset, err := getUploadOffset(r)
|
||||||
|
if err != nil {
|
||||||
|
return http.StatusBadRequest, fmt.Errorf("invalid upload offset: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
file, err := files.NewFileInfo(files.FileOptions{
|
||||||
|
Fs: d.user.Fs,
|
||||||
|
Path: r.URL.Path,
|
||||||
|
Modify: d.user.Perm.Modify,
|
||||||
|
Expand: false,
|
||||||
|
ReadHeader: d.server.TypeDetectionByHeader,
|
||||||
|
Checker: d,
|
||||||
|
})
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case errors.Is(err, afero.ErrFileNotFound):
|
||||||
|
return http.StatusNotFound, nil
|
||||||
|
case err != nil:
|
||||||
|
return errToStatus(err), err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case file.IsDir:
|
||||||
|
return http.StatusBadRequest, fmt.Errorf("cannot upload to a directory %s", file.RealPath())
|
||||||
|
case file.Size != uploadOffset:
|
||||||
|
return http.StatusConflict, fmt.Errorf(
|
||||||
|
"%s file size doesn't match the provided offset: %d",
|
||||||
|
file.RealPath(),
|
||||||
|
uploadOffset,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
openFile, err := d.user.Fs.OpenFile(r.URL.Path, os.O_WRONLY|os.O_APPEND, 0664)
|
||||||
|
if err != nil {
|
||||||
|
return http.StatusInternalServerError, fmt.Errorf("could not open file: %v", err)
|
||||||
|
}
|
||||||
|
defer openFile.Close()
|
||||||
|
|
||||||
|
_, err = openFile.Seek(uploadOffset, 0)
|
||||||
|
if err != nil {
|
||||||
|
return http.StatusInternalServerError, fmt.Errorf("could not seek file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
defer r.Body.Close()
|
||||||
|
bytesWritten, err := io.Copy(openFile, r.Body)
|
||||||
|
if err != nil {
|
||||||
|
return http.StatusInternalServerError, fmt.Errorf("could not write to file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Header().Set("Upload-Offset", strconv.FormatInt(uploadOffset+bytesWritten, 10))
|
||||||
|
|
||||||
|
return http.StatusNoContent, nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func getUploadOffset(r *http.Request) (int64, error) {
|
||||||
|
uploadOffset, err := strconv.ParseInt(r.Header.Get("Upload-Offset"), 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("invalid upload offset: %v", err)
|
||||||
|
}
|
||||||
|
return uploadOffset, nil
|
||||||
|
}
|
||||||
@ -1,279 +0,0 @@
|
|||||||
// InPlaceDataStore is a storage backend for tusd, which stores the uploaded
|
|
||||||
// files in the user's root directory, without creating any auxiliary files.
|
|
||||||
// It thus requires no clean-up on failed uploads.
|
|
||||||
// The destination metadata field needs to be set in the upload request.
|
|
||||||
// For each NewUpload, the target file is expanded by the upload's size.
|
|
||||||
// This way, multiple uploads can work on the same file, without interfering
|
|
||||||
// with each other.
|
|
||||||
// The uploads are resumable. Also, parallel uploads are supported, however,
|
|
||||||
// the initial POST requests to NewUpload must be synchronized and in order.
|
|
||||||
// Otherwise, no guarantee of the upload's integrity can be given.
|
|
||||||
|
|
||||||
package http
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"crypto/rand"
|
|
||||||
"encoding/hex"
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
tusd "github.com/tus/tusd/pkg/handler"
|
|
||||||
)
|
|
||||||
|
|
||||||
const uidLength = 16
|
|
||||||
const filePerm = 0644
|
|
||||||
|
|
||||||
type InPlaceDataStore struct {
|
|
||||||
// All uploads will be stored relative to this directory.
|
|
||||||
// It equals the user's root directory.
|
|
||||||
path string
|
|
||||||
|
|
||||||
// Store whether the user is permitted to create new files.
|
|
||||||
createPerm bool
|
|
||||||
|
|
||||||
// Store whether the user is permitted to modify files or only create new ones.
|
|
||||||
modifyPerm bool
|
|
||||||
|
|
||||||
// Maps an upload ID to its object.
|
|
||||||
// Required, since GetUpload only provides us with the id of an upload
|
|
||||||
// and expects us to return the Info object.
|
|
||||||
uploadsByID map[string]*InPlaceUpload
|
|
||||||
|
|
||||||
// Map all uploads by their path.
|
|
||||||
// Each path can have multiple uploads, as multiple uploads can work on the same file
|
|
||||||
// when parallel uploads are enabled.
|
|
||||||
uploadsByPath map[string][]*InPlaceUpload
|
|
||||||
|
|
||||||
// Each upload appends to the file, so we need to make sure
|
|
||||||
// each upload has expanded the file by info.Size bytes, before the next
|
|
||||||
// upload is created.
|
|
||||||
mutex *sync.Mutex
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewInPlaceDataStore(path string, createPerm, modifyPerm bool) *InPlaceDataStore {
|
|
||||||
return &InPlaceDataStore{
|
|
||||||
path: path,
|
|
||||||
createPerm: createPerm,
|
|
||||||
modifyPerm: modifyPerm,
|
|
||||||
uploadsByID: make(map[string]*InPlaceUpload),
|
|
||||||
uploadsByPath: make(map[string][]*InPlaceUpload),
|
|
||||||
mutex: &sync.Mutex{},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (store *InPlaceDataStore) UseIn(composer *tusd.StoreComposer) {
|
|
||||||
composer.UseCore(store)
|
|
||||||
composer.UseConcater(store)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (store *InPlaceDataStore) isPartOfNewUpload(fileExists bool, filePath string) bool {
|
|
||||||
if !fileExists {
|
|
||||||
// If the file doesn't exist, remove all upload references.
|
|
||||||
// This way we can eliminate inconsistencies for failed uploads.
|
|
||||||
for _, upload := range store.uploadsByPath[filePath] {
|
|
||||||
delete(store.uploadsByID, upload.ID)
|
|
||||||
}
|
|
||||||
delete(store.uploadsByPath, filePath)
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// In case the file exists, it is still possible that it is a new upload.
|
|
||||||
// E.g.: the user wants to overwrite an existing file.
|
|
||||||
return store.uploadsByPath[filePath] == nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (store *InPlaceDataStore) checkPermissions(isPartOfNewUpload bool) error {
|
|
||||||
// Return tusd.HTTPErrors, as they are handled by tusd.
|
|
||||||
if isPartOfNewUpload {
|
|
||||||
if !store.createPerm {
|
|
||||||
return tusd.NewHTTPError(errors.New("user is not allowed to create a new upload"), http.StatusForbidden)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !store.modifyPerm {
|
|
||||||
return tusd.NewHTTPError(errors.New("user is not allowed to modify existing files"), http.StatusForbidden)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (store *InPlaceDataStore) initializeUpload(filePath string, info *tusd.FileInfo) (int64, error) {
|
|
||||||
fileExists := true
|
|
||||||
if _, err := os.Stat(filePath); os.IsNotExist(err) {
|
|
||||||
fileExists = false
|
|
||||||
} else if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete existing files and references, if necessary.
|
|
||||||
isPartOfNewUpload := store.isPartOfNewUpload(fileExists, filePath)
|
|
||||||
|
|
||||||
if err := store.checkPermissions(isPartOfNewUpload); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
if isPartOfNewUpload && fileExists {
|
|
||||||
// Remove the file's contents (instead of re-creating it).
|
|
||||||
return 0, os.Truncate(filePath, 0)
|
|
||||||
}
|
|
||||||
if isPartOfNewUpload && !fileExists {
|
|
||||||
// Create the file, if it doesn't exist.
|
|
||||||
if _, err := os.Create(filePath); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// The file exists and is part of an existing upload.
|
|
||||||
// Open the file and enlarge it by the upload's size.
|
|
||||||
file, err := os.OpenFile(filePath, os.O_WRONLY, filePerm)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
|
|
||||||
// Get the file's current size and offset to the end of the file.
|
|
||||||
actualOffset, err := file.Seek(0, io.SeekEnd)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
// Enlarge the file by the upload's size (starting from the current offset).
|
|
||||||
if _, err = file.Write(make([]byte, info.Size)); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return actualOffset, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (store *InPlaceDataStore) NewUpload(ctx context.Context, info tusd.FileInfo) (_ tusd.Upload, err error) { //nolint: gocritic
|
|
||||||
// The method must return an unique id which is used to identify the upload
|
|
||||||
if info.ID, err = uid(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
destination, ok := info.MetaData["destination"]
|
|
||||||
if !ok {
|
|
||||||
return nil, errors.New("metadata field 'destination' not found in upload request")
|
|
||||||
}
|
|
||||||
filePath := filepath.Join(store.path, destination)
|
|
||||||
|
|
||||||
upload := &InPlaceUpload{
|
|
||||||
FileInfo: info,
|
|
||||||
filePath: filePath,
|
|
||||||
actualOffset: info.Size,
|
|
||||||
parent: store,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Lock the mutex, as we need to modify the target file synchronously.
|
|
||||||
store.mutex.Lock()
|
|
||||||
defer store.mutex.Unlock()
|
|
||||||
|
|
||||||
// Tus creates a POST request for the final concatenation.
|
|
||||||
// In that case, we don't need to create a new upload.
|
|
||||||
if !info.IsFinal {
|
|
||||||
if upload.actualOffset, err = store.initializeUpload(filePath, &info); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
store.uploadsByID[upload.ID] = upload
|
|
||||||
store.uploadsByPath[upload.filePath] = append(store.uploadsByPath[upload.filePath], upload)
|
|
||||||
|
|
||||||
return upload, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (store *InPlaceDataStore) GetUpload(ctx context.Context, id string) (tusd.Upload, error) {
|
|
||||||
if upload, ok := store.uploadsByID[id]; ok {
|
|
||||||
return upload, nil
|
|
||||||
} else {
|
|
||||||
return nil, errors.New("upload not found")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// We need to define a concater, as client libraries will automatically ask for a concatenation.
|
|
||||||
func (store *InPlaceDataStore) AsConcatableUpload(upload tusd.Upload) tusd.ConcatableUpload {
|
|
||||||
return upload.(*InPlaceUpload)
|
|
||||||
}
|
|
||||||
|
|
||||||
type InPlaceUpload struct {
|
|
||||||
tusd.FileInfo
|
|
||||||
// Extend the tusd.FileInfo struct with the target path of our uploaded file.
|
|
||||||
filePath string
|
|
||||||
// tusd expects offset to equal the upload's written bytes.
|
|
||||||
// As we can have multiple uploads working on the same file,
|
|
||||||
// this is not the case for us. Thus, store the actual offset.
|
|
||||||
// See: https://github.com/tus/tusd/blob/main/pkg/handler/unrouted_handler.go#L714
|
|
||||||
actualOffset int64
|
|
||||||
// Enable the upload to remove itself from the active uploads map.
|
|
||||||
parent *InPlaceDataStore
|
|
||||||
}
|
|
||||||
|
|
||||||
func (upload *InPlaceUpload) WriteChunk(ctx context.Context, offset int64, src io.Reader) (int64, error) {
|
|
||||||
// Open the file and seek to the given offset.
|
|
||||||
// Then, copy the given reader to the file, update the offset and return.
|
|
||||||
file, err := os.OpenFile(upload.filePath, os.O_WRONLY, filePerm)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
|
|
||||||
if _, err = file.Seek(upload.actualOffset+offset, io.SeekStart); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
n, err := io.Copy(file, src)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
upload.Offset += n
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (upload *InPlaceUpload) GetInfo(ctx context.Context) (tusd.FileInfo, error) {
|
|
||||||
return upload.FileInfo, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (upload *InPlaceUpload) GetReader(ctx context.Context) (io.Reader, error) {
|
|
||||||
return os.Open(upload.filePath)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (upload *InPlaceUpload) FinishUpload(ctx context.Context) error {
|
|
||||||
upload.parent.mutex.Lock()
|
|
||||||
defer upload.parent.mutex.Unlock()
|
|
||||||
|
|
||||||
delete(upload.parent.uploadsByID, upload.ID)
|
|
||||||
uploadsByPath := upload.parent.uploadsByPath[upload.filePath]
|
|
||||||
for i, u := range uploadsByPath {
|
|
||||||
if u.ID == upload.ID {
|
|
||||||
upload.parent.uploadsByPath[upload.filePath] = append(uploadsByPath[:i], uploadsByPath[i+1:]...)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(upload.parent.uploadsByPath[upload.filePath]) == 0 {
|
|
||||||
delete(upload.parent.uploadsByPath, upload.filePath)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (upload *InPlaceUpload) ConcatUploads(ctx context.Context, uploads []tusd.Upload) (err error) {
|
|
||||||
for _, u := range uploads {
|
|
||||||
if err := (u.(*InPlaceUpload)).FinishUpload(ctx); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return upload.FinishUpload(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func uid() (string, error) {
|
|
||||||
id := make([]byte, uidLength)
|
|
||||||
if _, err := io.ReadFull(rand.Reader, id); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return hex.EncodeToString(id), nil
|
|
||||||
}
|
|
||||||
@ -32,9 +32,9 @@ func errToStatus(err error) int {
|
|||||||
return http.StatusOK
|
return http.StatusOK
|
||||||
case os.IsPermission(err):
|
case os.IsPermission(err):
|
||||||
return http.StatusForbidden
|
return http.StatusForbidden
|
||||||
case os.IsNotExist(err), err == libErrors.ErrNotExist:
|
case os.IsNotExist(err), errors.Is(err, libErrors.ErrNotExist):
|
||||||
return http.StatusNotFound
|
return http.StatusNotFound
|
||||||
case os.IsExist(err), err == libErrors.ErrExist:
|
case os.IsExist(err), errors.Is(err, libErrors.ErrExist):
|
||||||
return http.StatusConflict
|
return http.StatusConflict
|
||||||
case errors.Is(err, libErrors.ErrPermissionDenied):
|
case errors.Is(err, libErrors.ErrPermissionDenied):
|
||||||
return http.StatusForbidden
|
return http.StatusForbidden
|
||||||
|
|||||||
@ -35,7 +35,6 @@ func (s *Storage) Get() (*Settings, error) {
|
|||||||
}
|
}
|
||||||
if set.Tus == (Tus{}) {
|
if set.Tus == (Tus{}) {
|
||||||
set.Tus = Tus{
|
set.Tus = Tus{
|
||||||
Enabled: false,
|
|
||||||
ChunkSize: DefaultTusChunkSize,
|
ChunkSize: DefaultTusChunkSize,
|
||||||
RetryCount: DefaultTusRetryCount,
|
RetryCount: DefaultTusRetryCount,
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,11 +1,10 @@
|
|||||||
package settings
|
package settings
|
||||||
|
|
||||||
const DefaultTusChunkSize = 20 * 1024 * 1024 // 20MB
|
const DefaultTusChunkSize = 10 * 1024 * 1024 // 10MB
|
||||||
const DefaultTusRetryCount = 3
|
const DefaultTusRetryCount = 5
|
||||||
|
|
||||||
// Tus contains the tus.io settings of the app.
|
// Tus contains the tus.io settings of the app.
|
||||||
type Tus struct {
|
type Tus struct {
|
||||||
Enabled bool `json:"enabled"`
|
|
||||||
ChunkSize uint64 `json:"chunkSize"`
|
ChunkSize uint64 `json:"chunkSize"`
|
||||||
RetryCount uint16 `json:"retryCount"`
|
RetryCount uint16 `json:"retryCount"`
|
||||||
}
|
}
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user