This commit is contained in:
Paul Zinselmeyer 2024-04-11 21:13:33 +02:00
commit 783014b369
Signed by: pfzetto
GPG key ID: B471A1AF06C895FD
22 changed files with 5938 additions and 0 deletions

6
.env Normal file
View file

@ -0,0 +1,6 @@
APPLICATION_BASE=https://oxidev.pfzetto.de/
ISSUER=https://auth.zettoit.eu/realms/zettoit
CLIENT_ID=oxicloud
CLIENT_SECRET=IvBcDOfp9WBfGNmwIbiv67bxCwuQUGbl
SCOPES=
DATABASE_URL=mysql://root:start1234@127.0.0.1/oxicloud

3
.gitignore vendored Normal file
View file

@ -0,0 +1,3 @@
/target
/data
.env

3308
Cargo.lock generated Normal file

File diff suppressed because it is too large Load diff

41
Cargo.toml Normal file
View file

@ -0,0 +1,41 @@
[package]
name = "oxicloud"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
dotenvy = "0.15.7"
log = "0.4.20"
env_logger = "0.10.1"
thiserror = "1.0.51"
tokio = { version = "1.35.1", features = ["full"] }
axum = { version = "0.7.2", features = [ "macros" ] }
axum-oidc = "0.2.1"
serde = { version = "1.0.193", features = [ "derive" ] }
serde-xml-rs = "0.6.0"
serde_json = "1.0.108"
webdav-handler = { git="https://github.com/pfzetto/webdav-handler-rs" }
tower = "0.4.13"
tower-http = { version = "0.5.0", features = [ "trace" ] }
tower-sessions = "0.7.0"
sqlx = { version="0.7.3", features=["runtime-tokio", "mysql", "time"] }
sha3 = "0.10.8"
md5 = "0.7.0"
rand = "0.8.5"
base64 = "0.21.5"
bytes = "1.5.0"
futures = "0.3.29"
xmltree = "0.10.3"
xml = "0.8.10"
time = "0.3.31"

21
README.md Normal file
View file

@ -0,0 +1,21 @@
Oxicloud is a file server that aims to be a minimal implementation of a Nextcloud combatible client API.
# Disclaimer
Please report any bugs you find using Oxicloud to this project.
Do not report issues at the Nexclound clients.
# Features
## General
- [x] File browsing
- [x] Folder creation
- [x] Small file upload
- [ ] Large file upload
- [x] File deletion
## Sharing
- [ ] User shares
- [ ] Link sharing
## Preview
- [ ] Images
- [ ] Documents

12
TODO.md Normal file
View file

@ -0,0 +1,12 @@
- XML permissions field dynamic
# XML `oc:permissions`
|Permission|Meaning|
|---|---|
|`CK`|Can Write|
|`S`| Shared with me|
|`R`|Can Reshare|
|`M`|Groupfolder|

106
flake.lock Normal file
View file

@ -0,0 +1,106 @@
{
"nodes": {
"crane": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1703089493,
"narHash": "sha256-WUjYqUP/Lhhop9+aiHVFREgElunx1AHEWxqMT8ePfzo=",
"owner": "ipetkov",
"repo": "crane",
"rev": "2a5136f14a9ac93d9d370d64a36026c5de3ae8a4",
"type": "github"
},
"original": {
"owner": "ipetkov",
"repo": "crane",
"type": "github"
}
},
"flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1701680307,
"narHash": "sha256-kAuep2h5ajznlPMD9rnQyffWG8EM/C73lejGofXvdM8=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "4022d587cbbfd70fe950c1e2083a02621806a725",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1703013332,
"narHash": "sha256-+tFNwMvlXLbJZXiMHqYq77z/RfmpfpiI3yjL6o/Zo9M=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "54aac082a4d9bb5bbc5c4e899603abfb76a3f6d6",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"crane": "crane",
"flake-utils": "flake-utils",
"nixpkgs": "nixpkgs",
"rust-overlay": "rust-overlay"
}
},
"rust-overlay": {
"inputs": {
"flake-utils": [
"flake-utils"
],
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1703124916,
"narHash": "sha256-LNAqNYcJf0iCm6jbzhzsQOC4F8SLyma5sckySn2Iffg=",
"owner": "oxalica",
"repo": "rust-overlay",
"rev": "81cb529bd066cd3668f9aa88d2afa8fbbbcd1208",
"type": "github"
},
"original": {
"owner": "oxalica",
"repo": "rust-overlay",
"type": "github"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

75
flake.nix Normal file
View file

@ -0,0 +1,75 @@
{
description = "oxicloud service";
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
flake-utils.url = "github:numtide/flake-utils";
rust-overlay = {
url = "github:oxalica/rust-overlay";
inputs = {
nixpkgs.follows = "nixpkgs";
flake-utils.follows = "flake-utils";
};
};
crane = {
url = "github:ipetkov/crane";
inputs = {
nixpkgs.follows = "nixpkgs";
};
};
};
outputs = { self, nixpkgs, flake-utils, rust-overlay, crane}: let
forAllSystems = function:
nixpkgs.lib.genAttrs [
"x86_64-linux"
"aarch64-linux"
] (system: function system nixpkgs.legacyPackages.${system});
in rec {
packages = forAllSystems(system: syspkgs: let
pkgs = import nixpkgs {
inherit system;
overlays = [ (import rust-overlay) ];
};
rustToolchain = pkgs.rust-bin.nightly.latest.default;
craneLib = (crane.mkLib pkgs).overrideToolchain rustToolchain;
src = pkgs.lib.cleanSourceWith {
src = craneLib.path ./.;
filter = path: type:
(pkgs.lib.hasSuffix "\.stpl" path) ||
(pkgs.lib.hasInfix "static" path) ||
(craneLib.filterCargoSources path type)
;
};
nativeBuildInputs = with pkgs; [ rustToolchain pkg-config ];
buildInputs = with pkgs; [ ];
commonArgs = {
inherit src buildInputs nativeBuildInputs;
};
cargoArtifacts = craneLib.buildDepsOnly commonArgs;
bin = craneLib.buildPackage (commonArgs // {
inherit cargoArtifacts;
pname = "oxicloud";
installPhaseCommand = ''
mkdir -p $out/bin
cp target/release/oxicloud $out/bin/oxicloud
cp -r static $out/static
'';
});
in {
inherit bin;
default = bin;
});
devShells = forAllSystems(system: pkgs: {
default = pkgs.mkShell {
packages = with pkgs; [ sqlx-cli cargo-watch mysql-client ];
inputsFrom = [ packages.${system}.bin ];
};
});
hydraJobs."bin" = forAllSystems(system: pkgs: packages.${system}.bin);
};
}

View file

@ -0,0 +1,20 @@
-- Add migration script here
CREATE TABLE users (
id INT UNSIGNED NOT NULL AUTO_INCREMENT,
oidc_id VARCHAR(48) NOT NULL,
name VARCHAR(128) NOT NULL,
PRIMARY KEY (id),
UNIQUE (oidc_id),
FULLTEXT (name)
);
CREATE TABLE user_tokens (
id int UNSIGNED NOT NULL AUTO_INCREMENT,
user_id INT UNSIGNED NOT NULL,
name VARCHAR(255) NOT NULL,
hash BINARY(32) NOT NULL,
PRIMARY KEY (id),
CONSTRAINT fk_users_app_passords FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE
);

View file

@ -0,0 +1,19 @@
-- Add migration script here
CREATE TABLE user_shares (
id INT UNSIGNED NOT NULL AUTO_INCREMENT,
created_by INT UNSIGNED NOT NULL,
src_user INT UNSIGNED NOT NULL,
src_path TEXT NOT NULL,
dst_user INT UNSIGNED NOT NULL,
dst_path TEXT NOT NULL,
expires_at DATETIME NULL,
note TEXT NOT NULL,
permissions TINYINT UNSIGNED NOT NULL,
PRIMARY KEY (id),
CONSTRAINT fk_user_shares_src FOREIGN KEY (src_user) REFERENCES users(id) ON DELETE CASCADE,
CONSTRAINT fk_user_shares_dst FOREIGN KEY (dst_user) REFERENCES users(id) ON DELETE CASCADE,
CONSTRAINT fk_user_shares_created_by FOREIGN KEY (created_by) REFERENCES users(id) ON DELETE CASCADE,
FULLTEXT (src_path, dst_path)
);

751
src/dav_fs.rs Normal file
View file

@ -0,0 +1,751 @@
use std::{
collections::{BTreeMap, HashMap},
fs::Metadata,
future::ready,
io::SeekFrom,
path::{Path, PathBuf},
pin::Pin,
sync::Arc,
task::{Context, Poll},
time::{SystemTime, UNIX_EPOCH},
};
use axum::http::StatusCode;
use base64::{engine::general_purpose::STANDARD as base64std, Engine};
use bytes::{Buf, Bytes, BytesMut};
use futures::{future::BoxFuture, stream, Future, FutureExt, Stream, StreamExt, TryFutureExt};
use log::{error, info};
use sqlx::{query, MySqlPool};
use tokio::{
fs::{metadata, read_dir, File, ReadDir},
io::{AsyncReadExt, AsyncSeekExt, AsyncWriteExt},
sync::RwLock,
};
use webdav_handler::{
davpath::DavPath,
fs::{
DavDirEntry, DavFile, DavFileSystem, DavMetaData, DavProp, FsError, FsFuture, FsResult,
FsStream, OpenOptions, ReadDirMeta,
},
};
use xmltree::{Element, XMLNode};
use crate::{error::Error, fs::FsUser};
const DAV_OC_PROP: &str = "http://owncloud.org/ns";
const DAV_NC_PROP: &str = "http://nextcloud.org/ns";
#[derive(Clone)]
pub struct FilesFs {
pub user: FsUser,
pub db: MySqlPool,
pub incoming_share_cache: Arc<RwLock<BTreeMap<PathBuf, (FsUser, PathBuf)>>>,
}
impl DavFileSystem for FilesFs {
fn open<'a>(&'a self, path: &'a DavPath, options: OpenOptions) -> FsFuture<Box<dyn DavFile>> {
info!("test: {}", path.to_string());
Box::pin(async move {
let (meta, path) = self
.resolve_path(path)
.map_err(|_| FsError::GeneralFailure)
.await?;
let mut tokio_options = tokio::fs::OpenOptions::new();
tokio_options
.read(options.read)
.write(options.write)
.append(options.append)
.truncate(options.truncate)
.create(options.create)
.create_new(options.create_new);
let file = OxiFile::new(path.clone(), &tokio_options).await?;
let file: Box<dyn DavFile> = Box::new(file);
Ok(file)
})
}
fn read_dir<'a>(
&'a self,
ppath: &'a DavPath,
meta: ReadDirMeta,
) -> FsFuture<FsStream<Box<dyn DavDirEntry>>> {
info!("dir: {:?}", ppath.as_pathbuf());
Box::pin(async move {
//let (is_share, path) = self
// .resolve_path(ppath)
// .map_err(|_| FsError::GeneralFailure)
// .await?;
//warn!("dirpath: {:?}", path);
//if !self.can_access(&path) {
// return Err(FsError::Forbidden);
//}
let (meta, path) = self
.resolve_path(ppath)
.map_err(|_| FsError::GeneralFailure)
.await?;
let entry_stream = DirEntryStream::read_dir(&path).await.map(|x| {
let y: Box<dyn DavDirEntry> = Box::new(x);
y
});
let entry_stream = entry_stream.chain(stream::iter(self.shares(&ppath).await.unwrap()));
let entry_stream: FsStream<Box<dyn DavDirEntry>> = Box::pin(entry_stream);
Ok(entry_stream)
})
}
fn metadata<'a>(&'a self, path: &'a DavPath) -> FsFuture<Box<dyn DavMetaData>> {
Box::pin(async move {
info!("metadata {:?}", path);
let (meta, path) = self
.resolve_path(path)
.map_err(|_| FsError::GeneralFailure)
.await?;
if meta.exists {
let file = OxiFile::new(path.clone(), tokio::fs::OpenOptions::new().read(true))
.await?
.metadata()
.await
.unwrap();
Ok(file)
} else {
Err(FsError::NotFound)
}
})
}
fn symlink_metadata<'a>(&'a self, path: &'a DavPath) -> FsFuture<Box<dyn DavMetaData>> {
// symlinks are currently not supported, so no difference to normal metadata
self.metadata(path)
}
fn create_dir<'a>(&'a self, path: &'a DavPath) -> FsFuture<()> {
Box::pin(async move {
info!("create dir");
let (path_meta, path) = self
.resolve_path(path)
.map_err(|_| FsError::GeneralFailure)
.await?;
match path_meta.exists {
false => {
//TODO remove shares in db with src_path=path
tokio::fs::create_dir_all(path)
.await
.map_err(|_| FsError::GeneralFailure)?;
Ok(())
}
true => Err(FsError::Exists),
}
})
}
fn remove_dir<'a>(&'a self, path: &'a DavPath) -> FsFuture<()> {
Box::pin(async move {
info!("remove dir");
let (path_meta, path) = self
.resolve_path(path)
.map_err(|_| FsError::GeneralFailure)
.await?;
match (path_meta.exists, path_meta.is_mount_point) {
(true, false) => {
//TODO remove shares in db with src_path.starts_with(path)
tokio::fs::remove_dir(path)
.await
.map_err(|_| FsError::GeneralFailure)?;
Ok(())
}
(true, true) => Err(FsError::NotImplemented), //TODO remove share in db with dst_path =path
_ => Err(FsError::NotFound),
}
})
}
fn remove_file<'a>(&'a self, path: &'a DavPath) -> FsFuture<()> {
Box::pin(async move {
info!("remove file");
let (path_meta, path) = self
.resolve_path(path)
.map_err(|_| FsError::GeneralFailure)
.await?;
match (path_meta.exists, path_meta.is_mount_point) {
(true, false) => {
//TODO remove shares in db with src_path=path
tokio::fs::remove_file(path)
.await
.map_err(|_| FsError::GeneralFailure)?;
Ok(())
}
(true, true) => Err(FsError::NotImplemented), //TODO remove share in db with dst_path =path
_ => Err(FsError::NotFound),
}
})
}
fn rename<'a>(&'a self, from: &'a DavPath, to: &'a DavPath) -> FsFuture<()> {
Box::pin(async move {
info!("rename");
let (from_meta, from) = self
.resolve_path(from)
.map_err(|_| FsError::GeneralFailure)
.await?;
let (to_meta, to) = self
.resolve_path(to)
.map_err(|_| FsError::GeneralFailure)
.await?;
match (from_meta.exists, to_meta.exists) {
(true, false) if !from_meta.is_mount_point => {
tokio::fs::rename(from, to)
.await
.map_err(|_| FsError::GeneralFailure)?;
Ok(())
}
(true, false) => Err(FsError::NotImplemented), //TODO rename dst in db
(false, _) => Err(FsError::NotFound),
(true, true) => Err(FsError::Exists),
}
})
}
fn copy<'a>(&'a self, from: &'a DavPath, to: &'a DavPath) -> FsFuture<()> {
Box::pin(async move {
info!("copy");
let (from_meta, from) = self
.resolve_path(from)
.map_err(|_| FsError::GeneralFailure)
.await?;
let (to_meta, to) = self
.resolve_path(to)
.map_err(|_| FsError::GeneralFailure)
.await?;
match (from_meta.exists, to_meta.exists) {
(true, false) => {
tokio::fs::copy(from, to)
.await
.map_err(|_| FsError::GeneralFailure)?;
}
(false, _) => return Err(FsError::NotFound),
(true, true) => return Err(FsError::Exists),
}
Ok(())
})
}
fn have_props<'a>(
&'a self,
path: &'a DavPath,
) -> std::pin::Pin<Box<dyn Future<Output = bool> + Send + 'a>> {
Box::pin(ready(true))
}
fn patch_props<'a>(
&'a self,
path: &'a DavPath,
patch: Vec<(bool, DavProp)>,
) -> FsFuture<Vec<(StatusCode, DavProp)>> {
Box::pin(async move {
error!("NOT IMPLEMENTED patch_props");
Err(FsError::NotImplemented)
})
}
fn get_props<'a>(&'a self, path: &'a DavPath, do_content: bool) -> FsFuture<Vec<DavProp>> {
Box::pin(async move {
error!("NOT IMPLEMENTED get_props");
Err(FsError::NotImplemented)
})
}
fn get_prop<'a>(&'a self, ppath: &'a DavPath, prop: DavProp) -> FsFuture<Element> {
fn element(prefix: &str, name: &str, children: Vec<XMLNode>) -> Element {
Element {
prefix: Some(prefix.to_string()),
namespace: None,
namespaces: None,
name: name.to_string(),
attributes: HashMap::default(),
children,
}
}
Box::pin(async move {
let (meta, path) = self
.resolve_path(ppath)
.map_err(|_| FsError::GeneralFailure)
.await?;
//if !self.can_access(&path) {
// return Err(FsError::Forbidden);
//}
//
const NC_PREFIX: &str = "nc";
const OC_PREFIX: &str = "oc";
match (
prop.name.as_str(),
prop.namespace.as_deref().unwrap_or_default(),
) {
("permissions", DAV_OC_PROP) => {
let val = match meta.is_share {
true => "SRGDNVCK",
false => "RGDNVCK",
};
return Ok(element(
OC_PREFIX,
"permissions",
vec![XMLNode::Text(val.to_string())],
));
}
("rich-workspace", DAV_NC_PROP) if false => {
return Ok(element(
NC_PREFIX,
"rich-workspace",
vec![XMLNode::Text(
"# Hello World\nLorem Ipsum, si dolor amet.".to_string(),
)],
))
}
("has-preview", DAV_NC_PROP) => {
return Ok(element(
NC_PREFIX,
"has-preview",
vec![XMLNode::Text("false".to_string())],
))
}
("mount-type", DAV_NC_PROP) => {
let val = match meta.is_share {
true => "shared",
false => "",
};
return Ok(element(
NC_PREFIX,
"mount-type",
vec![XMLNode::Text(val.to_string())],
));
}
("sharees", DAV_NC_PROP) if false => {
return Ok(element(
NC_PREFIX,
"sharees",
vec![XMLNode::Element(element(
"sharee",
"nc",
vec![
XMLNode::Element(element(
"id",
"nc",
vec![XMLNode::Text("1".to_string())],
)),
XMLNode::Element(element(
"display-name",
"nc",
vec![XMLNode::Text("testuser".to_string())],
)),
XMLNode::Element(element(
"type",
"nc",
vec![XMLNode::Text("0".to_string())],
)),
],
))],
));
}
("is-encrypted", DAV_NC_PROP) => {
return Ok(element(
NC_PREFIX,
"is-encrypted",
vec![XMLNode::Text("false".to_string())],
))
}
_ => (),
}
Err(FsError::NotFound)
})
}
fn get_quota(&self) -> FsFuture<(u64, Option<u64>)> {
info!("quota");
Box::pin(async move {
error!("NOT IMPLEMENTED get_quota");
Err(FsError::NotImplemented)
})
}
}
impl FilesFs {
async fn fill_cache(&self) -> Result<(), Error> {
if self.incoming_share_cache.read().await.is_empty() {
let res = query!(
"SELECT src_user, src_path, dst_path FROM user_shares WHERE dst_user = ?",
self.user.0
)
.fetch_all(&self.db)
.await?;
let mut cache = self.incoming_share_cache.write().await;
res.into_iter().for_each(|x| {
cache.insert(
PathBuf::from(x.dst_path),
(FsUser(x.src_user), PathBuf::from(x.src_path)),
);
});
}
Ok(())
}
async fn shares(&self, rel_root: &DavPath) -> Result<Vec<Box<dyn DavDirEntry>>, Error> {
let rel_dst_home = rel_root.as_pathbuf();
let rel_dst_home = rel_dst_home
.strip_prefix("/")
.map(|x| x.to_owned())
.unwrap_or_else(|_| rel_dst_home);
self.fill_cache().await?;
let cache = self.incoming_share_cache.read().await;
let mut cursor = cache.lower_bound(std::ops::Bound::Included(&rel_dst_home));
let mut share_dir_entries: Vec<Box<dyn DavDirEntry>> = vec![];
while let Some((dst_path, (src_user, src_path))) =
cursor
.key_value()
.and_then(|x| match x.0.parent() == Some(&rel_dst_home) {
true => Some(x),
false => None,
})
{
let name = dst_path.strip_prefix(&rel_dst_home).unwrap();
let mut p = PathBuf::new();
p.push(src_user.home_dir());
p.push(src_path);
let meta = metadata(&p).await.unwrap();
share_dir_entries.push(Box::new(OxiDirEntry {
name: name.to_str().unwrap_or_default().into(),
len: meta.len(),
dir: p.is_dir(),
etag: etag(&p, &meta),
modified: meta.modified().ok(),
created: meta.created().ok(),
}));
cursor.move_next();
}
Ok(share_dir_entries)
}
async fn resolve_path(&self, rel_root: &DavPath) -> Result<(PathMeta, PathBuf), Error> {
let rel_dst_home = rel_root.as_pathbuf();
let rel_dst_home = rel_dst_home
.strip_prefix("/")
.map(|x| x.to_owned())
.unwrap_or_else(|_| rel_dst_home);
self.fill_cache().await?;
let cache = self.incoming_share_cache.read().await;
let cursor = cache.lower_bound(std::ops::Bound::Included(&rel_dst_home));
let share = cursor
.key_value()
.and_then(|x| rel_dst_home.strip_prefix(x.0).map(|y| (y, x.1)).ok());
if let Some((rel_mnt, (src_user, src_path))) = share {
let mut p = PathBuf::new();
p.push(src_user.home_dir());
p.push(src_path);
p.push(rel_mnt);
Ok((
PathMeta {
is_share: true,
is_mount_point: rel_mnt == rel_dst_home,
owner: *src_user,
exists: p.exists(),
},
p,
))
} else {
let mut p = PathBuf::new();
p.push(&self.user.home_dir());
p.push(rel_dst_home.clone());
Ok((
PathMeta {
is_share: false,
is_mount_point: false,
owner: self.user,
exists: p.exists(),
},
p,
))
}
}
}
#[derive(Debug)]
pub struct PathMeta {
is_share: bool,
is_mount_point: bool,
owner: FsUser,
exists: bool,
}
pub struct DirEntryStream<'a> {
entry_stream: ReadDir,
entry_stream_completed: bool,
intermediate: Vec<BoxFuture<'a, OxiDirEntry>>,
}
impl<'a> DirEntryStream<'a> {
pub async fn read_dir(path: &Path) -> DirEntryStream<'a> {
Self {
entry_stream: read_dir(path).await.unwrap(),
entry_stream_completed: false,
intermediate: vec![],
}
}
}
impl<'a> Stream for DirEntryStream<'a> {
type Item = OxiDirEntry;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let entry_stream_item = match self.entry_stream_completed {
false => self.entry_stream.poll_next_entry(cx),
true => Poll::Ready(Ok(None)),
};
match entry_stream_item {
Poll::Ready(Ok(Some(entry))) => self.intermediate.push(Box::pin(async move {
let meta = entry.metadata().await.unwrap();
OxiDirEntry {
name: entry.file_name().to_str().unwrap_or_default().into(),
len: meta.len(),
dir: entry.file_type().await.unwrap().is_dir(),
etag: etag(&entry.path(), &meta),
modified: meta.modified().ok(),
created: meta.created().ok(),
}
})),
Poll::Ready(Ok(None)) => {
self.entry_stream_completed = true;
}
_ => (),
}
for (i, intermediate) in self.intermediate.iter_mut().enumerate() {
if let Poll::Ready(x) = intermediate.poll_unpin(cx) {
let intermediate_fut = self.intermediate.remove(i);
drop(intermediate_fut);
return Poll::Ready(Some(x));
}
}
match self.intermediate.is_empty() {
true => Poll::Ready(None),
false => Poll::Pending,
}
}
}
#[derive(Clone, Debug)]
pub struct OxiDirEntry {
name: Box<str>,
len: u64,
dir: bool,
etag: String,
modified: Option<SystemTime>,
created: Option<SystemTime>,
}
impl DavDirEntry for OxiDirEntry {
fn name(&self) -> Vec<u8> {
self.name.as_bytes().to_vec()
}
fn metadata(&self) -> FsFuture<Box<dyn DavMetaData>> {
Box::pin(async move {
let meta: Box<dyn DavMetaData> = Box::new(self.clone());
Ok(meta)
})
}
}
impl DavMetaData for OxiDirEntry {
fn len(&self) -> u64 {
self.len
}
fn modified(&self) -> FsResult<std::time::SystemTime> {
self.modified.ok_or(FsError::GeneralFailure)
}
fn created(&self) -> FsResult<std::time::SystemTime> {
self.created.ok_or(FsError::GeneralFailure)
}
fn is_dir(&self) -> bool {
self.dir
}
fn etag(&self) -> Option<String> {
Some(self.etag.to_string())
}
}
#[derive(Debug)]
pub struct OxiFile {
path: PathBuf,
file: Option<File>,
}
impl OxiFile {
pub async fn new(path: PathBuf, options: &tokio::fs::OpenOptions) -> FsResult<Self> {
let file = options
.open(&path)
.await
.map_err(|_| FsError::GeneralFailure)?;
Ok(Self {
path,
file: Some(file),
})
}
}
impl DavFile for OxiFile {
fn metadata(&mut self) -> FsFuture<Box<dyn DavMetaData>> {
Box::pin(async move {
let meta = metadata(&self.path).await.unwrap();
let etag = etag(&self.path, &meta);
let oxi_meta = OxiFileMeta {
len: meta.len(),
is_dir: meta.is_dir(),
etag,
};
let oxi_meta: Box<dyn DavMetaData> = Box::new(oxi_meta);
Ok(oxi_meta)
})
}
fn write_bytes(&mut self, buf: Bytes) -> FsFuture<()> {
async move {
let mut file = self.file.take().unwrap();
let res = file.write_all(&buf).await;
self.file = Some(file);
res.map_err(|e| e.into())
}
.boxed()
}
fn write_buf(&mut self, mut buf: Box<dyn Buf + Send>) -> FsFuture<()> {
async move {
let mut file = self.file.take().unwrap();
while buf.remaining() > 0 {
match file.write(buf.chunk()).await {
Ok(n) => buf.advance(n),
Err(e) => {
self.file = Some(file);
return Err(e.into());
}
}
}
self.file = Some(file);
Ok(())
}
.boxed()
}
fn read_bytes(&mut self, count: usize) -> FsFuture<Bytes> {
async move {
let mut file = self.file.take().unwrap();
let mut buf = BytesMut::with_capacity(count);
let res = unsafe {
buf.set_len(count);
file.read(&mut buf).await.map(|n| {
buf.set_len(n);
buf.freeze()
})
};
self.file = Some(file);
res.map_err(|e| e.into())
}
.boxed()
}
fn seek(&mut self, pos: SeekFrom) -> FsFuture<u64> {
async move {
let mut file = self.file.take().unwrap();
let res = file.seek(pos).await;
self.file = Some(file);
res.map_err(|e| e.into())
}
.boxed()
}
fn flush(&mut self) -> FsFuture<()> {
async move {
let mut file = self.file.take().unwrap();
let res = file.flush().await;
self.file = Some(file);
res.map_err(|e| e.into())
}
.boxed()
}
}
#[derive(Clone, Debug)]
pub struct OxiFileMeta {
len: u64,
is_dir: bool,
etag: String,
}
impl DavMetaData for OxiFileMeta {
fn len(&self) -> u64 {
self.len
}
fn modified(&self) -> FsResult<SystemTime> {
Err(FsError::NotImplemented)
}
fn is_dir(&self) -> bool {
self.is_dir
}
fn etag(&self) -> Option<String> {
Some(self.etag.to_string())
}
}
fn etag(path: &Path, meta: &Metadata) -> String {
let modified = meta
.modified()
.ok()
.and_then(|x| x.duration_since(UNIX_EPOCH).ok())
.map(|x| x.as_secs().to_ne_bytes())
.unwrap_or_default()
.to_vec();
let path = path.to_str().unwrap_or_default().as_bytes();
let hash: [u8; 16] = md5::compute([path, &modified].concat()).into();
base64std.encode(hash)
}

46
src/error.rs Normal file
View file

@ -0,0 +1,46 @@
use axum::{http::StatusCode, response::IntoResponse, Json};
use log::error;
use serde::Serialize;
use thiserror::Error;
#[derive(Debug, Error)]
pub enum Error {
#[error("internal server error")]
InternalServerError,
#[error("bad request")]
BadRequest,
#[error("csrf check failed")]
CsrfCheckFailed,
#[error("sqlx error: {0:?}")]
Sqlx(#[from] sqlx::Error),
#[error("base64 error: {0:?}")]
Base64User(base64::DecodeError),
}
impl IntoResponse for Error {
fn into_response(self) -> axum::response::Response {
error!("{:?}", self);
match self {
Self::CsrfCheckFailed => (
StatusCode::PRECONDITION_FAILED,
Json(JsonError {
message: "CSRF check failed".into(),
}),
)
.into_response(),
Self::Sqlx(_) => StatusCode::INTERNAL_SERVER_ERROR.into_response(),
Self::InternalServerError => StatusCode::INTERNAL_SERVER_ERROR.into_response(),
Self::Base64User(_) => StatusCode::BAD_REQUEST.into_response(),
Self::BadRequest => StatusCode::BAD_REQUEST.into_response(),
}
}
}
#[derive(Serialize)]
struct JsonError {
message: Box<str>,
}

13
src/fs.rs Normal file
View file

@ -0,0 +1,13 @@
use std::path::PathBuf;
#[derive(Clone, Copy, Debug)]
pub struct FsUser(pub u32);
impl FsUser {
pub fn home_dir(&self) -> PathBuf {
PathBuf::from(format!("./data/files/{}", self.0))
}
pub fn upload_dir(&self) -> PathBuf {
PathBuf::from(format!("./data/upload/{}", self.0))
}
}

418
src/main.rs Normal file
View file

@ -0,0 +1,418 @@
#![feature(btree_cursors)]
use std::{env, path::PathBuf, sync::Arc};
use axum::{
async_trait,
body::Body,
debug_handler,
error_handling::HandleErrorLayer,
extract::{FromRequestParts, Path, Query, Request, State},
http::{
header::{AUTHORIZATION, CONTENT_TYPE, USER_AGENT},
request::Parts,
uri::PathAndQuery,
HeaderMap, StatusCode, Uri,
},
response::{IntoResponse, Redirect, Response},
routing::{any, get, head},
BoxError, Json, Router,
};
use axum_oidc::{
error::MiddlewareError, EmptyAdditionalClaims, OidcAuthLayer, OidcClaims, OidcLoginLayer,
};
use base64::{engine::general_purpose::STANDARD as base64std, Engine};
use bytes::Bytes;
use dav_fs::FilesFs;
use dotenvy::dotenv;
use fs::FsUser;
use log::debug;
use ocs::{OcsJson, OcsXml};
use rand::{distributions, Rng};
use serde::{Deserialize, Serialize};
use sha3::{Digest, Sha3_256};
use sqlx::{query, query_as, FromRow, MySqlPool};
use tokio::{fs::File, io::AsyncReadExt, net::TcpListener};
use tower::ServiceBuilder;
use tower_http::trace::TraceLayer;
use tower_sessions::{cookie::SameSite, MemoryStore, SessionManagerLayer};
use webdav_handler::{memfs::MemFs, memls::MemLs, DavConfig, DavHandler};
use crate::error::Error;
mod dav_fs;
mod error;
mod fs;
mod ocs;
mod upload_fs;
type HResult<T> = Result<T, Error>;
#[derive(Clone)]
pub struct AppState {
dav_server: Arc<DavHandler>,
db: MySqlPool,
share_password_salt: Arc<str>,
}
#[tokio::main]
async fn main() {
dotenv().ok();
env_logger::init();
let database_url = env::var("DATABASE_URL").expect("DATABASE_URL");
let database_pool = MySqlPool::connect(&database_url)
.await
.expect("working database connection");
let application_base = env::var("APPLICATION_BASE").expect("APPLICATION_BASE");
let application_base =
Uri::from_maybe_shared(application_base).expect("valid APPLICATION_BASE");
let issuer = env::var("ISSUER").expect("ISSUER");
let client_id = env::var("CLIENT_ID").expect("CLIENT_ID");
let client_secret = env::var("CLIENT_SECRET").ok();
let scopes: Vec<String> = env::var("SCOPES")
.expect("SCOPES")
.split(' ')
.map(String::from)
.collect::<Vec<_>>();
let session_store = MemoryStore::default();
let session_service = ServiceBuilder::new()
.layer(HandleErrorLayer::new(|_: BoxError| async {
StatusCode::BAD_REQUEST
}))
.layer(SessionManagerLayer::new(session_store).with_same_site(SameSite::Lax));
let oidc_login_service = ServiceBuilder::new()
.layer(HandleErrorLayer::new(|e: MiddlewareError| async {
debug!("auth layer error {:?}", e);
e.into_response(); //TODO return this response
StatusCode::INTERNAL_SERVER_ERROR
}))
.layer(OidcLoginLayer::<EmptyAdditionalClaims>::new());
let oidc_auth_service = ServiceBuilder::new()
.layer(HandleErrorLayer::new(|e: MiddlewareError| async {
debug!("auth layer error {:?}", e);
e.into_response(); //TODO return this response
StatusCode::INTERNAL_SERVER_ERROR
}))
.layer(
OidcAuthLayer::<EmptyAdditionalClaims>::discover_client(
application_base,
issuer,
client_id,
client_secret,
scopes,
)
.await
.unwrap(),
);
let state = AppState {
dav_server: Arc::new(
DavHandler::builder()
.filesystem(MemFs::new())
.locksystem(MemLs::new())
.build_handler(),
),
db: database_pool,
share_password_salt: "".into(),
};
let app = Router::new()
.route("/index.php/login/flow", get(login_flow))
.layer(oidc_login_service)
.route("/index.php/204", get(connectivity_check))
.route("/status.php", get(status))
//.route("/remote.php/dav", any(remote_dav))
.route(
"/remote.php/dav",
head(|| async { StatusCode::OK.into_response() }),
)
.nest_service(
"/remote.php/dav/files/:user_id",
any(user_dav).with_state(state.clone()),
)
.nest_service(
"/remote.php/dav/upload/:user_id",
any(upload_dav).with_state(state.clone()),
)
.nest("/ocs/v2.php", ocs::router(state.clone()))
.route("/index.php/avatar/:user_id/512", get(avatar_512))
.route("/index.php/avatar/:user_id/:size", get(|| async { "" }))
.layer(oidc_auth_service)
.layer(session_service)
.layer(TraceLayer::new_for_http())
.with_state(state);
let listener = TcpListener::bind("[::]:8080").await.expect("valid address");
axum::serve(listener, app).await.unwrap()
}
async fn connectivity_check() -> impl IntoResponse {
StatusCode::NO_CONTENT
}
async fn avatar_512() -> Response {
let mut buf = vec![];
let mut image_file = File::open("static/ferris.png").await.unwrap();
image_file.read_to_end(&mut buf).await;
let buf = Bytes::from(buf);
Response::builder()
.header(CONTENT_TYPE, "image/png")
.body(Body::from(buf))
.unwrap()
}
#[derive(Deserialize)]
struct Format {
format: Option<String>,
}
async fn ocs_default(Query(query): Query<Format>) -> Response {
if query.format.as_deref() == Some("json") {
OcsJson::not_implemented(()).into_response()
} else {
OcsXml::not_implemented(()).into_response()
}
}
#[derive(Serialize)]
#[serde(rename_all = "camelCase")]
struct Status {
installed: bool,
maintenance: bool,
needs_db_upgrade: bool,
version: Box<str>,
version_string: Box<str>,
edition: Box<str>,
productname: Box<str>,
extended_support: bool,
}
async fn status() -> Json<Status> {
debug!("GET /status.php");
Json(Status {
installed: true,
maintenance: false,
needs_db_upgrade: false,
version: "27.1.3".into(),
version_string: "27.1.3".into(),
edition: "".into(),
productname: "pfzettos Nextcloud Backend".into(),
extended_support: false,
})
}
async fn user_dav(
State(state): State<AppState>,
user: User,
Path(user_id): Path<u32>,
mut req: Request,
) -> Response {
if user_id != user.id {
return StatusCode::UNAUTHORIZED.into_response();
}
let user_prefix = format!("/remote.php/dav/files/{}", user.id);
let path = match req.uri().path_and_query() {
Some(x) => format!(
"{}{}?{}",
user_prefix,
x.path(),
x.query().unwrap_or_default()
),
None => format!("{}{}", user_prefix, req.uri().path()),
}
.parse()
.unwrap();
let mut parts = req.uri().clone().into_parts();
parts.path_and_query = Some(path);
*req.uri_mut() = Uri::from_parts(parts).unwrap();
let dav_config = DavConfig::new()
.strip_prefix(user_prefix)
.autoindex(true, None)
.filesystem(Box::new(FilesFs {
user: FsUser(user.id),
db: state.db.clone(),
incoming_share_cache: Arc::default(),
}));
state
.dav_server
.handle_with(dav_config, req)
.await
.into_response()
}
async fn upload_dav(
State(state): State<AppState>,
user: User,
Path(user_id): Path<u32>,
req: Request,
) -> Response {
if user_id != user.id {
return StatusCode::UNAUTHORIZED.into_response();
}
let dav_config = DavConfig::new()
.autoindex(true, None)
.filesystem(MemFs::new());
state
.dav_server
.handle_with(dav_config, req)
.await
.into_response()
}
async fn login_flow(
State(state): State<AppState>,
headers: HeaderMap,
OidcClaims(claims): OidcClaims<EmptyAdditionalClaims>,
) -> HResult<Redirect> {
let user_id = query!(
"SELECT id FROM users WHERE oidc_id = ?",
claims.subject().to_string()
)
.fetch_optional(&state.db)
.await?;
let user_id = match user_id {
Some(row) => row.id,
None => {
let mut transaction = state.db.begin().await?;
query!(
"INSERT INTO users (oidc_id, name) VALUES (?, ?)",
claims.subject().to_string(),
claims
.preferred_username()
.map(|x| x.to_string())
.unwrap_or_default()
)
.execute(&mut *transaction)
.await?;
let id = query!("SELECT LAST_INSERT_ID() AS id")
.fetch_one(&mut *transaction)
.await?
.id as u32;
transaction.commit().await?;
id
}
};
let user_token = rand::thread_rng()
.sample_iter(distributions::Alphanumeric)
.take(64)
.map(char::from)
.collect::<String>();
let user_token_hash = {
let mut token_hasher = Sha3_256::default();
token_hasher.update(&user_token);
token_hasher.finalize().to_vec()
};
let user_agent = headers
.get(USER_AGENT)
.and_then(|x| x.to_str().ok())
.unwrap_or("");
query!(
"INSERT INTO user_tokens (user_id, name, hash) VALUES (?, ?, ?)",
user_id,
user_agent,
user_token_hash
)
.execute(&state.db)
.await?;
let mut transaction = state.db.begin().await?;
query!(
"INSERT INTO user_tokens (user_id, name, hash) VALUES (?, ?, ?)",
user_id,
user_agent,
user_token_hash
)
.execute(&mut *transaction)
.await?;
let token_id = query!("SELECT LAST_INSERT_ID() AS id")
.fetch_one(&mut *transaction)
.await?
.id as u32;
transaction.commit().await?;
Ok(Redirect::temporary(&format!(
"nc://login/server:http://10.50.10.2:8080&user:{}&password:{}",
token_id, user_token
)))
}
impl AsRef<MySqlPool> for AppState {
fn as_ref(&self) -> &MySqlPool {
&self.db
}
}
#[derive(FromRow, Clone)]
pub struct User {
id: u32,
oidc_id: Box<str>,
name: Box<str>,
}
impl User {
pub fn home_dir(&self) -> PathBuf {
PathBuf::from(format!("./files/{}/", self.id))
}
pub fn upload_dir(&self) -> PathBuf {
PathBuf::from(format!("./upload/{}/", self.id))
}
}
#[async_trait]
impl<S: AsRef<MySqlPool> + Send + Sync> FromRequestParts<S> for User {
type Rejection = Result<StatusCode, Error>;
async fn from_request_parts(parts: &mut Parts, state: &S) -> Result<Self, Self::Rejection> {
let headers = HeaderMap::from_request_parts(parts, state)
.await
.map_err(|_| Err(Error::InternalServerError))?;
let authorizazion = headers
.get(AUTHORIZATION)
.ok_or(Ok(StatusCode::UNAUTHORIZED))?;
if let Some(basic) = authorizazion
.to_str()
.unwrap_or_default()
.strip_prefix("Basic ")
{
let basic = base64std
.decode(basic)
.map_err(|e| Err(Error::Base64User(e)))?;
let basic = std::str::from_utf8(&basic).unwrap_or_default();
let (username, password) = basic.split_once(':').unwrap_or_default();
let password_hash = {
let mut hasher = Sha3_256::default();
hasher.update(password);
hasher.finalize().to_vec()
};
let user = query_as!(User, "SELECT users.id, users.oidc_id, users.name FROM user_tokens INNER JOIN users ON user_tokens.user_id=users.id WHERE user_tokens.id = ? AND user_tokens.hash = ?", username, password_hash).fetch_optional(state.as_ref()).await.map_err(|x|Err(x.into()))?;
match user {
Some(user) => Ok(user),
None => Err(Ok(StatusCode::UNAUTHORIZED)),
}
} else {
Err(Ok(StatusCode::UNAUTHORIZED))
}
}
}

159
src/ocs.rs Normal file
View file

@ -0,0 +1,159 @@
use axum::{
http::header::CONTENT_TYPE,
response::{IntoResponse, Response},
routing::{get, post},
Router,
};
use serde::Serialize;
use crate::AppState;
pub mod core;
pub mod files_sharing;
pub mod provisioning_api;
pub fn router(state: AppState) -> Router<AppState> {
Router::new()
.route("/cloud/capabilities", get(core::get_status))
.route("/cloud/users", get(provisioning_api::get_users))
.route("/cloud/user", get(provisioning_api::get_user))
.route(
"/apps/files_sharing/api/v1/shares",
get(files_sharing::share::get_shares).post(files_sharing::share::create),
)
.route(
"/apps/files_sharing/api/v1/sharees",
get(files_sharing::sharees::search),
)
.with_state(state)
}
#[derive(Serialize)]
pub struct OcsJson<T: Serialize> {
ocs: Ocs<T>,
}
#[derive(Serialize)]
pub struct OcsXml<T: Serialize>(Ocs<T>);
#[derive(Serialize)]
#[serde(rename = "ocs")]
pub struct Ocs<T: Serialize> {
pub meta: OcsMeta,
pub data: T,
}
#[derive(Serialize)]
pub struct OcsMeta {
pub status: Box<str>,
pub statuscode: u16,
pub message: Box<str>,
pub totalitems: Option<Box<str>>,
pub itemsperpage: Option<Box<str>>,
}
impl<T: Serialize> OcsJson<T> {
pub fn ok(data: T) -> Self {
Self {
ocs: Ocs {
meta: OcsMeta {
status: "ok".into(),
statuscode: 200,
message: "ok".into(),
totalitems: None,
itemsperpage: None,
},
data,
},
}
}
pub fn not_found(data: T) -> Self {
Self {
ocs: Ocs {
meta: OcsMeta {
status: "not found".into(),
statuscode: 200,
message: "not found".into(),
totalitems: None,
itemsperpage: None,
},
data,
},
}
}
pub fn not_implemented(data: T) -> Self {
Self {
ocs: Ocs {
meta: OcsMeta {
status: "not found".into(),
statuscode: 200,
message: "not found".into(),
totalitems: None,
itemsperpage: None,
},
data,
},
}
}
}
impl<T: Serialize> OcsXml<T> {
pub fn ok(data: T) -> Self {
Self(Ocs {
meta: OcsMeta {
status: "ok".into(),
statuscode: 200,
message: "ok".into(),
totalitems: None,
itemsperpage: None,
},
data,
})
}
pub fn not_found(data: T) -> Self {
Self(Ocs {
meta: OcsMeta {
status: "not found".into(),
statuscode: 200,
message: "not found".into(),
totalitems: None,
itemsperpage: None,
},
data,
})
}
pub fn not_implemented(data: T) -> Self {
Self(Ocs {
meta: OcsMeta {
status: "not implemented".into(),
statuscode: 501,
message: "not implemented".into(),
totalitems: None,
itemsperpage: None,
},
data,
})
}
}
impl<T: Serialize> IntoResponse for OcsJson<T> {
fn into_response(self) -> axum::response::Response {
let body = serde_json::to_string(&self).unwrap();
Response::builder()
.header(CONTENT_TYPE, "application/json")
.body(body)
.unwrap()
.into_response()
}
}
impl<T: Serialize> IntoResponse for OcsXml<T> {
fn into_response(self) -> axum::response::Response {
let body = serde_xml_rs::to_string(&self).unwrap();
Response::builder()
.header(CONTENT_TYPE, "application/xml; charset=utf-8")
.body(body)
.unwrap()
.into_response()
}
}

192
src/ocs/core.rs Normal file
View file

@ -0,0 +1,192 @@
use axum::http::HeaderMap;
use serde::Serialize;
use crate::{error::Error, HResult};
use super::OcsJson;
// https://docs.nextcloud.com/server/latest/developer_manual/_static/openapi.html#/operations/core-ocs-get-capabilities
pub async fn get_status(headers: HeaderMap) -> HResult<OcsJson<CapabilitiesData>> {
if let Some(Ok("true")) = headers.get("OCS-ApiRequest").map(|x| x.to_str()) {
Ok(OcsJson::ok(CapabilitiesData {
version: CapabilitiesVersion {
major: 0,
minor: 1,
micro: 0,
string: "0.1.0".into(),
edition: "".into(),
extended_support: false,
},
capabilities: Capabilities {
dav: DavCapabilities {
bulkupload: "1.0".into(),
chunking: "1.0".into(),
},
user_status: UserStatusCapabilities::default(),
weather_status: WeatherStatusCapabilities::default(),
files: FilesCapabilities::default(),
files_sharing: FilesSharingCapabilities {
api_enabled: true,
default_permissions: 31,
group_sharing: false,
public: FilesSharingPublicCapabilities {
enabled: true,
multiple_links: true,
send_mail: false,
upload: true,
upload_files_drop: true,
..Default::default()
},
resharing: true,
..Default::default()
},
},
}))
} else {
Err(Error::CsrfCheckFailed)
}
}
#[derive(Serialize)]
pub struct CapabilitiesData {
version: CapabilitiesVersion,
capabilities: Capabilities,
}
#[derive(Serialize)]
#[serde(rename_all = "camelCase")]
pub struct CapabilitiesVersion {
major: i64,
minor: i64,
micro: i64,
string: Box<str>,
edition: Box<str>,
extended_support: bool,
}
#[derive(Serialize)]
pub struct Capabilities {
dav: DavCapabilities,
user_status: UserStatusCapabilities,
weather_status: WeatherStatusCapabilities,
files: FilesCapabilities,
files_sharing: FilesSharingCapabilities,
}
#[derive(Serialize)]
pub struct DavCapabilities {
bulkupload: Box<str>,
chunking: Box<str>,
}
#[derive(Serialize, Default)]
pub struct UserStatusCapabilities {
enabled: bool,
restore: bool,
supports_emoji: bool,
}
#[derive(Serialize, Default)]
pub struct WeatherStatusCapabilities {
enabled: bool,
}
#[derive(Serialize)]
pub struct FilesCapabilities {
bigfilechunking: bool,
blacklisted_files: Box<[Box<str>]>,
comments: bool,
undelete: bool,
version_deletion: bool,
version_labeling: bool,
versioning: bool,
}
impl Default for FilesCapabilities {
fn default() -> Self {
Self {
bigfilechunking: true,
blacklisted_files: Default::default(),
comments: false,
undelete: false,
version_deletion: false,
version_labeling: false,
versioning: false,
}
}
}
#[derive(Serialize, Default)]
pub struct FilesSharingCapabilities {
api_enabled: bool,
default_permissions: u32,
federation: FilesSharingFederationCapabilities,
group_sharing: bool,
public: FilesSharingPublicCapabilities,
resharing: bool,
sharebymail: FilesSharingSharebymailCapabilities,
sharee: FilesSharingShareeCapabilities,
user: FilesSharingUserCapabilities,
}
#[derive(Serialize, Default)]
pub struct FilesSharingFederationCapabilities {
expire_date: Expire,
expire_date_supported: Expire,
incoming: bool,
outgoing: bool,
}
#[derive(Serialize, Default)]
pub struct FilesSharingPublicCapabilities {
enabled: bool,
expire_date: Expire,
expire_date_internal: Expire,
expire_date_remote: Expire,
multiple_links: bool,
send_mail: bool,
upload: bool,
upload_files_drop: bool,
}
#[derive(Serialize, Default)]
pub struct FilesSharingSharebymailCapabilities {
enabled: bool,
expire_date: Expire,
send_passowrd_by_mail: bool,
}
#[derive(Serialize, Default)]
pub struct FilesSharingUserCapabilities {
expire_date: Expire,
send_mail: bool,
}
#[derive(Serialize, Default)]
pub struct FilesSharingShareeCapabilities {
always_show_unique: bool,
query_lookup_default: bool,
}
#[derive(Serialize, Default)]
pub struct Expire {
enabled: bool,
}
/*
//TODO: doesn't work
pub async fn get_avatar(Path((user_id, size)): Path<(u32, u32)>) -> impl IntoResponse {
let image = Reader::open("static/ferris.png").unwrap().decode().unwrap();
image.resize(size, size, FilterType::Nearest);
let mut buf = vec![];
image.write_to(&mut Cursor::new(&mut buf), ImageFormat::Png);
Response::builder()
.status(StatusCode::OK)
.header(CONTENT_TYPE, "image/png")
.header(
CONTENT_DISPOSITION,
format!("inline; filename=\"avatar.{size}.png\""),
)
.header("X-NC-IsCustomAvatar", "1")
.body(Body::from(buf))
.unwrap()
}
*/

187
src/ocs/files_sharing.rs Normal file
View file

@ -0,0 +1,187 @@
use serde::{Deserialize, Serialize};
pub mod share;
pub mod sharees;
#[derive(Debug, Default, PartialEq, Eq)]
pub enum ShareType {
#[default]
Unknown,
User,
Group,
Usergroup,
Link,
Email,
#[deprecated]
Contact,
Remote,
Circle,
Guest,
RemoteGroup,
Room,
UserRoom,
Deck,
DeckUser,
Sciencemesh,
}
impl<'de> Deserialize<'de> for ShareType {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let value = i32::deserialize(deserializer)?;
Ok(match value {
0 => Self::User,
1 => Self::Group,
2 => Self::Usergroup,
3 => Self::Link,
4 => Self::Email,
5 => Self::Contact,
6 => Self::Remote,
7 => Self::Circle,
8 => Self::Guest,
9 => Self::RemoteGroup,
10 => Self::Room,
11 => Self::UserRoom,
12 => Self::Deck,
13 => Self::DeckUser,
15 => Self::Sciencemesh,
_ => Self::Unknown,
})
}
}
impl Serialize for ShareType {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
let value = match self {
Self::Unknown => -1,
Self::User => 0,
Self::Group => 1,
Self::Usergroup => 2,
Self::Link => 3,
Self::Email => 4,
Self::Contact => 5,
Self::Remote => 6,
Self::Circle => 7,
Self::Guest => 8,
Self::RemoteGroup => 9,
Self::Room => 10,
Self::UserRoom => 11,
Self::Deck => 12,
Self::DeckUser => 13,
Self::Sciencemesh => 15,
};
value.serialize(serializer)
}
}
impl ShareType {
pub fn all() -> Box<[Self]> {
vec![
Self::User,
Self::Group,
Self::Usergroup,
Self::Link,
Self::Email,
Self::Remote,
Self::Circle,
Self::Guest,
Self::RemoteGroup,
Self::Room,
Self::UserRoom,
Self::Deck,
Self::DeckUser,
Self::Sciencemesh,
]
.into()
}
}
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum SharePermission {
Read = 1,
Update = 2,
Create = 4,
Delete = 8,
Share = 16,
}
#[derive(Debug, Default, Clone)]
pub struct SharePermissions(pub Box<[SharePermission]>);
impl SharePermissions {
pub fn rus() -> Self {
Self(
vec![
SharePermission::Read,
SharePermission::Update,
SharePermission::Share,
]
.into(),
)
}
}
impl From<u8> for SharePermissions {
fn from(value: u8) -> Self {
let mut permissions = vec![];
if value & 1 == 1 {
permissions.push(SharePermission::Read);
}
if value >> 1 & 1 == 1 {
permissions.push(SharePermission::Update);
}
if value >> 2 & 1 == 1 {
permissions.push(SharePermission::Create);
}
if value >> 3 & 1 == 1 {
permissions.push(SharePermission::Delete);
}
if value >> 4 & 1 == 1 {
permissions.push(SharePermission::Share);
}
Self(permissions.into())
}
}
impl From<&SharePermissions> for u8 {
fn from(value: &SharePermissions) -> Self {
let mut out = 0;
for p in value.0.iter() {
match p {
SharePermission::Read => out += 1,
SharePermission::Update => out += 2,
SharePermission::Create => out += 4,
SharePermission::Delete => out += 8,
SharePermission::Share => out += 16,
}
}
out
}
}
impl<'de> Deserialize<'de> for SharePermissions {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let value = u8::deserialize(deserializer)
.map(|x| x.into())
.unwrap_or_else(|_| Self::rus());
Ok(value)
}
}
impl Serialize for SharePermissions {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
u8::from(self).serialize(serializer)
}
}

View file

@ -0,0 +1,154 @@
use axum::{
extract::State,
response::{IntoResponse, Response},
Form,
};
use serde::{Deserialize, Serialize};
use sqlx::query;
use time::{macros::format_description, Time};
use crate::{error::Error, ocs::OcsXml, AppState, HResult, User};
use super::{SharePermission, SharePermissions, ShareType};
#[derive(Deserialize, Debug)]
pub struct CreateShareData {
attributes: Option<Box<str>>,
#[serde(default, rename = "expireDate")]
expire_date: Option<Box<str>>,
#[serde(default)]
label: Box<str>,
#[serde(default)]
note: Box<str>,
#[serde(default)]
password: Box<str>,
path: Option<Box<str>>,
#[serde(default = "SharePermissions::rus")]
permissions: SharePermissions,
#[serde(default, rename = "publicUpload")]
public_upload: bool,
#[serde(default, rename = "shareType")]
share_type: ShareType,
#[serde(rename = "shareWith")]
share_with: Option<Box<str>>,
}
pub async fn create(
State(state): State<AppState>,
user: User,
Form(form): Form<CreateShareData>,
) -> HResult<Response> {
match form.share_type {
ShareType::User => Ok(create_user_share(&state, &user, &form)
.await?
.into_response()),
_ => todo!(),
}
}
async fn create_user_share(
state: &AppState,
user: &User,
data: &CreateShareData,
) -> HResult<OcsXml<UserShare>> {
// shareWith is required
let share_with = match &data.share_with {
Some(x) => x.as_ref(),
None => return Err(Error::BadRequest),
};
// check if user_to is valid
if query!("SELECT id FROM users WHERE id = ?", share_with)
.fetch_optional(&state.db)
.await?
.is_none()
{
return Err(Error::BadRequest);
}
//TODO: parse expire_date
let expires_at: Option<Time> = None;
//TODO: change to owner of orig if resharing
let dst_user_id = user.id;
let dst_user_name = user.name.clone();
//TODO: check permissions of user on data.path
let share_id = {
let mut transaction = state.db.begin().await?;
//query!("INSERT INTO user_shares (dst_user_id, from_user_id, to_user_id, expires_at, note, path, permissions) VALUES (?, ?, ?, ?, ?, ?, ?)", &dst_user_id, &user.id, &share_with, expires_at, &data.note, &data.path, u8::from(&data.permissions)).execute(&state.db).await?;
let row = query!("SELECT LAST_INSERT_ID() as id")
.fetch_one(&mut *transaction)
.await?;
transaction.commit().await?;
row.id
};
Ok(OcsXml::ok(UserShare {
id: share_id,
share_type: ShareType::User,
uid_owner: user.id,
displayname_owner: user.name.clone(),
permissions: data.permissions.clone(),
can_edit: data.permissions.0.contains(&SharePermission::Update),
can_delete: data.permissions.0.contains(&SharePermission::Delete),
expiration: expires_at.and_then(|x| {
x.format(format_description!(
"[year]-[month]-[day] [hour]:[minute]:[second]"
))
.ok()
.map(|x| x.into())
}),
uid_file_owner: dst_user_id,
note: data.note.clone(),
label: data.label.clone(),
displayname_file_owner: dst_user_name,
..Default::default()
}))
}
#[derive(Default, Serialize)]
struct UserShare {
id: u64,
share_type: ShareType,
uid_owner: u32,
displayname_owner: Box<str>,
permissions: SharePermissions,
can_edit: bool,
can_delete: bool,
stime: (),
parent: Box<str>,
expiration: Option<Box<str>>,
token: Box<str>,
uid_file_owner: u32,
note: Box<str>,
label: Box<str>,
displayname_file_owner: Box<str>,
path: Box<str>,
item_type: Box<str>,
item_permissions: u8,
mimetype: Box<str>,
has_preview: bool,
storage_id: Box<str>,
storage: Box<str>,
item_source: (),
file_source: (),
file_parent: (),
file_target: (),
item_size: (),
item_mtime: (),
share_with: Box<str>,
share_with_displayname: Box<str>,
share_with_displayname_unique: Box<str>,
status: (),
mail_send: bool,
hide_download: bool,
attributes: (),
tags: Box<[()]>,
}
pub async fn get_shares() -> HResult<OcsXml<()>> {
Ok(OcsXml::ok(()))
}

View file

@ -0,0 +1,170 @@
use axum::{
extract::{Query, State},
response::IntoResponse,
};
use serde::{Deserialize, Serialize};
use sqlx::query;
use crate::{
ocs::{OcsJson, OcsXml},
AppState, HResult, User,
};
use super::ShareType;
#[derive(Debug, Deserialize)]
pub struct SearchQuery {
#[serde(rename = "itemType")]
item_type: Option<Box<str>>,
#[serde(default)]
lookup: bool,
#[serde(default = "default_page")]
page: u64,
#[serde(rename = "perPage", default = "default_per_page")]
per_page: u64,
search: Box<str>,
#[serde(default = "default_share_types")]
share_type: Box<[ShareType]>,
}
fn default_page() -> u64 {
1
}
fn default_per_page() -> u64 {
200
}
fn default_share_types() -> Box<[ShareType]> {
ShareType::all()
}
pub async fn search(
State(state): State<AppState>,
user: User,
Query(query): Query<SearchQuery>,
) -> HResult<OcsJson<SearchResult>> {
let mut transaction = state.db.begin().await?;
let (rough_users, exact_users) = if query.share_type.contains(&ShareType::User) {
let rough = query!(
"SELECT id, name FROM users WHERE MATCH(name) AGAINST (? IN NATURAL LANGUAGE MODE) AND name != ?",
&query.search,
&query.search,
)
.fetch_all(&mut *transaction)
.await?
.into_iter()
.map(|x| SearchUser {
icon: "user-icon".into(),
label: x.name.clone().into(),
share_with_displayname_unique: x.name.into(),
value: SearchUserValue {
share_type: ShareType::User,
share_with: x.id.to_string().into(),
},
..Default::default()
})
.collect();
let exact = query!("SELECT id, name FROM users WHERE name = ?", &query.search)
.fetch_all(&mut *transaction)
.await?
.into_iter()
.map(|x| SearchUser {
icon: "user-icon".into(),
label: x.name.clone().into(),
share_with_displayname_unique: x.name.into(),
value: SearchUserValue {
share_type: ShareType::User,
share_with: x.id.to_string().into(),
},
..Default::default()
})
.collect();
(rough, exact)
} else {
(vec![], vec![])
};
transaction.commit().await?;
Ok(OcsJson::ok(SearchResult {
users: rough_users.into(),
exact: ExactSearchResult {
users: exact_users.into(),
..Default::default()
},
..Default::default()
}))
}
#[derive(Serialize, Default)]
pub struct SearchResult {
circles: Box<[()]>,
emails: Box<[()]>,
exact: ExactSearchResult,
groups: Box<[()]>,
lookup: Box<[()]>,
#[serde(rename = "lookupEnabled")]
lookup_enabled: bool,
remote_groups: Box<[()]>,
remotes: Box<[()]>,
rooms: Box<[()]>,
users: Box<[SearchUser]>,
}
#[derive(Serialize, Default)]
pub struct ExactSearchResult {
circles: Box<[()]>,
emails: Box<[()]>,
groups: Box<[()]>,
lookup: Box<[()]>,
remote_groups: Box<[()]>,
remotes: Box<[()]>,
rooms: Box<[()]>,
users: Box<[SearchUser]>,
}
#[derive(Default, Serialize)]
pub struct SearchUser {
icon: Box<str>,
label: Box<str>,
#[serde(rename = "shareWithDisplayNameUnique")]
share_with_displayname_unique: Box<str>,
status: SearchUserStatus,
subline: Box<str>,
value: SearchUserValue,
}
#[derive(Serialize)]
pub struct SearchUserStatus {
#[serde(rename = "clearAt")]
clear_at: (),
icon: (),
message: (),
status: Box<str>,
}
impl Default for SearchUserStatus {
fn default() -> Self {
Self {
clear_at: Default::default(),
icon: Default::default(),
message: Default::default(),
status: "offline".into(),
}
}
}
#[derive(Default, Serialize)]
pub struct SearchUserValue {
#[serde(rename = "shareType")]
share_type: ShareType,
#[serde(rename = "shareWith")]
share_with: Box<str>,
}

126
src/ocs/provisioning_api.rs Normal file
View file

@ -0,0 +1,126 @@
use serde::Serialize;
use crate::{HResult, User};
use super::OcsJson;
pub async fn get_users(user: User) -> HResult<OcsJson<OcsUsers>> {
Ok(OcsJson::ok(OcsUsers {
users: vec!["demo".into()].into(),
}))
}
#[derive(Serialize)]
pub struct OcsUsers {
users: Box<[Box<str>]>,
}
pub async fn get_user(user: User) -> HResult<OcsJson<OcsUser>> {
Ok(OcsJson::ok(OcsUser {
id: user.id.to_string().into(),
language: "de".into(),
enabled: Some(true),
profile_enabled: "1".into(),
display_name: user.name.clone(),
display_name_2: user.name,
groups: vec!["demo".into()].into(),
last_login: 0,
..Default::default()
}))
}
#[derive(Serialize, Default)]
pub struct OcsUser {
additional_mail: Box<[Box<str>]>,
#[serde(rename = "additional_mailScope")]
additional_mail_scope: Box<[OcsScope]>,
address: Box<str>,
#[serde(rename = "addressScope")]
address_scope: OcsScope,
#[serde(rename = "avatarScope")]
avatar_scope: OcsScope,
backend: Box<str>,
#[serde(rename = "backendCapabilities")]
backend_capabilities: OcsBackendCapabilities,
biography: Box<str>,
#[serde(rename = "biographyScope")]
biography_scope: OcsScope,
#[serde(rename = "display-name")]
display_name: Box<str>,
#[serde(rename = "displayname")]
display_name_2: Box<str>,
#[serde(rename = "displaynameScope")]
display_name_scope: OcsScope,
email: Option<Box<str>>,
#[serde(rename = "emailScope")]
email_scope: OcsScope,
enabled: Option<bool>,
fediverse: Box<str>,
#[serde(rename = "fediverseScope")]
fediverse_scope: OcsScope,
groups: Box<[Box<str>]>,
headline: Box<str>,
#[serde(rename = "headlineScope")]
headline_scope: OcsScope,
id: Box<str>,
language: Box<str>,
#[serde(rename = "lastLogin")]
last_login: i64,
locale: Box<str>,
manager: Box<str>,
notify_email: Option<Box<str>>,
organisation: Box<str>,
#[serde(rename = "organisationScope")]
organisation_scope: OcsScope,
phone: Box<str>,
#[serde(rename = "phoneScope")]
phone_scope: OcsScope,
profile_enabled: Box<str>,
#[serde(rename = "profile_enabledScope")]
profile_enabled_scope: OcsScope,
quota: OcsQuota,
role: Box<str>,
#[serde(rename = "roleScope")]
role_scope: OcsScope,
#[serde(rename = "storageLocation")]
storage_location: Box<str>,
subadmin: Box<[Box<str>]>,
twitter: Box<str>,
#[serde(rename = "twitterScope")]
twitter_scope: OcsScope,
website: Box<str>,
#[serde(rename = "websiteScope")]
website_scope: OcsScope,
}
#[derive(Serialize, Default)]
#[serde(rename_all = "camelCase")]
pub struct OcsBackendCapabilities {
set_display_name: bool,
set_password: bool,
}
#[derive(Serialize, Default)]
pub enum OcsScope {
#[serde(rename = "v2-private")]
Private,
#[default]
#[serde(rename = "v2-local")]
Local,
#[serde(rename = "v2-federated")]
Federated,
#[serde(rename = "v2-published")]
Published,
}
#[derive(Serialize, Default)]
pub struct OcsQuota {
free: u64,
quota: i64,
relative: f32,
total: u64,
used: u32,
}

111
src/upload_fs.rs Normal file
View file

@ -0,0 +1,111 @@
use webdav_handler::fs::DavFileSystem;
#[derive(Clone)]
pub struct UploadFs {}
impl DavFileSystem for UploadFs {
fn open<'a>(
&'a self,
path: &'a webdav_handler::davpath::DavPath,
options: webdav_handler::fs::OpenOptions,
) -> webdav_handler::fs::FsFuture<Box<dyn webdav_handler::fs::DavFile>> {
todo!()
}
fn read_dir<'a>(
&'a self,
path: &'a webdav_handler::davpath::DavPath,
meta: webdav_handler::fs::ReadDirMeta,
) -> webdav_handler::fs::FsFuture<
webdav_handler::fs::FsStream<Box<dyn webdav_handler::fs::DavDirEntry>>,
> {
todo!()
}
fn metadata<'a>(
&'a self,
path: &'a webdav_handler::davpath::DavPath,
) -> webdav_handler::fs::FsFuture<Box<dyn webdav_handler::fs::DavMetaData>> {
todo!()
}
fn symlink_metadata<'a>(
&'a self,
path: &'a webdav_handler::davpath::DavPath,
) -> webdav_handler::fs::FsFuture<Box<dyn webdav_handler::fs::DavMetaData>> {
self.metadata(path)
}
fn create_dir<'a>(
&'a self,
path: &'a webdav_handler::davpath::DavPath,
) -> webdav_handler::fs::FsFuture<()> {
todo!()
}
fn remove_dir<'a>(
&'a self,
path: &'a webdav_handler::davpath::DavPath,
) -> webdav_handler::fs::FsFuture<()> {
todo!()
}
fn remove_file<'a>(
&'a self,
path: &'a webdav_handler::davpath::DavPath,
) -> webdav_handler::fs::FsFuture<()> {
todo!()
}
fn rename<'a>(
&'a self,
from: &'a webdav_handler::davpath::DavPath,
to: &'a webdav_handler::davpath::DavPath,
) -> webdav_handler::fs::FsFuture<()> {
todo!()
}
fn copy<'a>(
&'a self,
from: &'a webdav_handler::davpath::DavPath,
to: &'a webdav_handler::davpath::DavPath,
) -> webdav_handler::fs::FsFuture<()> {
todo!()
}
fn have_props<'a>(
&'a self,
path: &'a webdav_handler::davpath::DavPath,
) -> std::pin::Pin<Box<dyn futures::prelude::Future<Output = bool> + Send + 'a>> {
Box::pin(futures::prelude::future::ready(false))
}
fn patch_props<'a>(
&'a self,
path: &'a webdav_handler::davpath::DavPath,
patch: Vec<(bool, webdav_handler::fs::DavProp)>,
) -> webdav_handler::fs::FsFuture<Vec<(axum::http::StatusCode, webdav_handler::fs::DavProp)>>
{
todo!()
}
fn get_props<'a>(
&'a self,
path: &'a webdav_handler::davpath::DavPath,
do_content: bool,
) -> webdav_handler::fs::FsFuture<Vec<webdav_handler::fs::DavProp>> {
todo!()
}
fn get_prop<'a>(
&'a self,
path: &'a webdav_handler::davpath::DavPath,
prop: webdav_handler::fs::DavProp,
) -> webdav_handler::fs::FsFuture<xmltree::Element> {
todo!()
}
fn get_quota(&self) -> webdav_handler::fs::FsFuture<(u64, Option<u64>)> {
todo!()
}
}

BIN
static/ferris.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 61 KiB