Monorepo Integration: Unified Backend, Frontend & Documentation

- Reorganize project into monorepo structure
  - backend/app/ - New FastAPI backend (modular with src/)
  - backend/legacy/ - Legacy database modules (relational & vector)
  - frontend/ - React text editor application

- Add launcher.py for easy full-stack startup
- Complete documentation in README.md
  - Quick start guide
  - API endpoints reference
  - Development setup
  - Troubleshooting

- Refactor main.py to 35 lines (app configuration only)
- Update .gitignore for full-stack project
- Add CHANGELOG.md with version history (v0.1.0-v0.1.1)

Structure is now clean and ready for team collaboration.
This commit is contained in:
Krzysztof Cieślik
2026-04-09 17:06:59 +02:00
parent fddaad962b
commit 6bbb24e633
55 changed files with 808 additions and 93 deletions

35
backend/app/main.py Normal file
View File

@@ -0,0 +1,35 @@
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from src.config import ALLOWED_ORIGINS
from src.database import init_db
from src.routers import init, login, status
app = FastAPI(
title="Archivium Local Backend",
description="Local archive encryption and authentication system",
version="0.1.0",
)
app.add_middleware(
CORSMiddleware,
allow_origins=ALLOWED_ORIGINS,
allow_credentials=True,
allow_methods=["POST", "GET"],
allow_headers=["Content-Type"],
)
app.include_router(init.router)
app.include_router(login.router)
app.include_router(status.router)
@app.on_event("startup")
def startup():
"""Initialize database on startup."""
init_db()
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="127.0.0.1", port=8000)

View File

@@ -0,0 +1,19 @@
[project]
name = "archivium-backend"
version = "0.1.0"
description = "Local archive encryption and authentication system"
requires-python = ">=3.9,<3.13"
dependencies = [
"fastapi>=0.104.0",
"uvicorn[standard]>=0.24.0",
"pydantic>=2.5.0",
"sqlalchemy>=2.0.0",
"passlib[argon2]>=1.7.4",
]
[project.optional-dependencies]
dev = [
"pytest>=7.0.0",
"pytest-asyncio>=0.21.0",
"httpx>=0.25.0",
]

View File

@@ -0,0 +1 @@
"""Archivium Backend Application."""

10
backend/app/src/config.py Normal file
View File

@@ -0,0 +1,10 @@
import os
DB_PATH = os.getenv("DATABASE_PATH", "archivium.db")
ALLOWED_ORIGINS = os.getenv("ALLOWED_ORIGINS", "http://localhost:3000,http://localhost:5173").split(",")
if os.getenv("ENVIRONMENT") == "development":
ALLOWED_ORIGINS = ["http://localhost:3000", "http://localhost:5173"]
elif os.getenv("ENVIRONMENT") == "production":
ALLOWED_ORIGINS = os.getenv("CORS_ORIGINS", "").split(",")

View File

@@ -0,0 +1,28 @@
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, Session
from .config import DB_PATH
from .models import Base
DATABASE_URL = f"sqlite:///{DB_PATH}"
engine = create_engine(
DATABASE_URL,
connect_args={"check_same_thread": False},
)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
def init_db():
"""Initialize database schema."""
Base.metadata.create_all(bind=engine)
def get_db():
"""Provide database session for dependency injection."""
db = SessionLocal()
try:
yield db
finally:
db.close()

13
backend/app/src/models.py Normal file
View File

@@ -0,0 +1,13 @@
from sqlalchemy import Column, Integer, String
from sqlalchemy.orm import declarative_base
Base = declarative_base()
class SecurityConfig(Base):
"""Storage for password and recovery key hashes."""
__tablename__ = "security_config"
id = Column(Integer, primary_key=True, index=True)
password_hash = Column(String, nullable=False)
recovery_key_hash = Column(String, nullable=False)

View File

@@ -0,0 +1 @@
"""Routers for Archivium Backend."""

View File

@@ -0,0 +1,39 @@
import os
from fastapi import APIRouter, HTTPException, Depends
from sqlalchemy.orm import Session
from ..database import get_db
from ..models import SecurityConfig
from ..schemas import InitRequest
from ..security import hash_password, generate_recovery_key
from ..config import DB_PATH
router = APIRouter(prefix="/api", tags=["init"])
@router.post("/init")
def initialize_system(request: InitRequest, db: Session = Depends(get_db)):
"""Initialize system with master password and generate recovery key."""
if os.path.exists(DB_PATH):
raise HTTPException(
status_code=400,
detail="System already initialized",
)
recovery_key = generate_recovery_key()
hashed_password = hash_password(request.password)
hashed_recovery = hash_password(recovery_key)
db.add(
SecurityConfig(
password_hash=hashed_password,
recovery_key_hash=hashed_recovery,
)
)
db.commit()
return {
"status": "success",
"recovery_key": recovery_key,
"message": "System initialized. Save recovery key in safe place.",
}

View File

@@ -0,0 +1,50 @@
import os
from fastapi import APIRouter, HTTPException, Depends
from sqlalchemy.orm import Session
from ..database import get_db
from ..models import SecurityConfig
from ..schemas import LoginRequest
from ..security import verify_password
from ..config import DB_PATH
router = APIRouter(prefix="/api", tags=["login"])
@router.post("/login")
def login(request: LoginRequest, db: Session = Depends(get_db)):
"""Authenticate with master password or recovery key."""
if not os.path.exists(DB_PATH):
raise HTTPException(
status_code=404,
detail="System not initialized",
)
config = db.query(SecurityConfig).first()
if not config:
raise HTTPException(
status_code=500,
detail="System configuration error",
)
if request.is_recovery:
if not verify_password(request.password, config.recovery_key_hash):
raise HTTPException(
status_code=401,
detail="Invalid recovery key",
)
return {
"status": "success",
"message": "Authenticated with recovery key. Please change password.",
}
if not verify_password(request.password, config.password_hash):
raise HTTPException(
status_code=401,
detail="Invalid password",
)
return {
"status": "success",
"message": "Successfully authenticated",
}

View File

@@ -0,0 +1,12 @@
import os
from fastapi import APIRouter
from ..config import DB_PATH
router = APIRouter(prefix="/api", tags=["status"])
@router.get("/status")
def get_status():
"""Check if system is initialized."""
return {"is_initialized": os.path.exists(DB_PATH)}

View File

@@ -0,0 +1,10 @@
from pydantic import BaseModel, Field
class InitRequest(BaseModel):
password: str = Field(..., min_length=8, max_length=128)
class LoginRequest(BaseModel):
password: str = Field(..., min_length=1, max_length=128)
is_recovery: bool = False

View File

@@ -0,0 +1,20 @@
import secrets
from passlib.hash import argon2
def hash_password(password: str) -> str:
"""Hash password using Argon2."""
return argon2.using(type="ID").hash(password)
def verify_password(password: str, hash_value: str) -> bool:
"""Verify password against hash."""
try:
return argon2.using(type="ID").verify(password, hash_value)
except Exception:
return False
def generate_recovery_key() -> str:
"""Generate random recovery key (32 hex characters)."""
return secrets.token_hex(16)

View File

@@ -0,0 +1,10 @@
{
"word_embedding_dimension": 384,
"pooling_mode_cls_token": false,
"pooling_mode_mean_tokens": true,
"pooling_mode_max_tokens": false,
"pooling_mode_mean_sqrt_len_tokens": false,
"pooling_mode_weightedmean_tokens": false,
"pooling_mode_lasttoken": false,
"include_prompt": true
}

View File

@@ -0,0 +1,173 @@
---
language: en
license: apache-2.0
library_name: sentence-transformers
tags:
- sentence-transformers
- feature-extraction
- sentence-similarity
- transformers
datasets:
- s2orc
- flax-sentence-embeddings/stackexchange_xml
- ms_marco
- gooaq
- yahoo_answers_topics
- code_search_net
- search_qa
- eli5
- snli
- multi_nli
- wikihow
- natural_questions
- trivia_qa
- embedding-data/sentence-compression
- embedding-data/flickr30k-captions
- embedding-data/altlex
- embedding-data/simple-wiki
- embedding-data/QQP
- embedding-data/SPECTER
- embedding-data/PAQ_pairs
- embedding-data/WikiAnswers
pipeline_tag: sentence-similarity
---
# all-MiniLM-L6-v2
This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 384 dimensional dense vector space and can be used for tasks like clustering or semantic search.
## Usage (Sentence-Transformers)
Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed:
```
pip install -U sentence-transformers
```
Then you can use the model like this:
```python
from sentence_transformers import SentenceTransformer
sentences = ["This is an example sentence", "Each sentence is converted"]
model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
embeddings = model.encode(sentences)
print(embeddings)
```
## Usage (HuggingFace Transformers)
Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings.
```python
from transformers import AutoTokenizer, AutoModel
import torch
import torch.nn.functional as F
#Mean Pooling - Take attention mask into account for correct averaging
def mean_pooling(model_output, attention_mask):
token_embeddings = model_output[0] #First element of model_output contains all token embeddings
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
# Sentences we want sentence embeddings for
sentences = ['This is an example sentence', 'Each sentence is converted']
# Load model from HuggingFace Hub
tokenizer = AutoTokenizer.from_pretrained('sentence-transformers/all-MiniLM-L6-v2')
model = AutoModel.from_pretrained('sentence-transformers/all-MiniLM-L6-v2')
# Tokenize sentences
encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt')
# Compute token embeddings
with torch.no_grad():
model_output = model(**encoded_input)
# Perform pooling
sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])
# Normalize embeddings
sentence_embeddings = F.normalize(sentence_embeddings, p=2, dim=1)
print("Sentence embeddings:")
print(sentence_embeddings)
```
------
## Background
The project aims to train sentence embedding models on very large sentence level datasets using a self-supervised
contrastive learning objective. We used the pretrained [`nreimers/MiniLM-L6-H384-uncased`](https://huggingface.co/nreimers/MiniLM-L6-H384-uncased) model and fine-tuned in on a
1B sentence pairs dataset. We use a contrastive learning objective: given a sentence from the pair, the model should predict which out of a set of randomly sampled other sentences, was actually paired with it in our dataset.
We developed this model during the
[Community week using JAX/Flax for NLP & CV](https://discuss.huggingface.co/t/open-to-the-community-community-week-using-jax-flax-for-nlp-cv/7104),
organized by Hugging Face. We developed this model as part of the project:
[Train the Best Sentence Embedding Model Ever with 1B Training Pairs](https://discuss.huggingface.co/t/train-the-best-sentence-embedding-model-ever-with-1b-training-pairs/7354). We benefited from efficient hardware infrastructure to run the project: 7 TPUs v3-8, as well as intervention from Googles Flax, JAX, and Cloud team member about efficient deep learning frameworks.
## Intended uses
Our model is intended to be used as a sentence and short paragraph encoder. Given an input text, it outputs a vector which captures
the semantic information. The sentence vector may be used for information retrieval, clustering or sentence similarity tasks.
By default, input text longer than 256 word pieces is truncated.
## Training procedure
### Pre-training
We use the pretrained [`nreimers/MiniLM-L6-H384-uncased`](https://huggingface.co/nreimers/MiniLM-L6-H384-uncased) model. Please refer to the model card for more detailed information about the pre-training procedure.
### Fine-tuning
We fine-tune the model using a contrastive objective. Formally, we compute the cosine similarity from each possible sentence pairs from the batch.
We then apply the cross entropy loss by comparing with true pairs.
#### Hyper parameters
We trained our model on a TPU v3-8. We train the model during 100k steps using a batch size of 1024 (128 per TPU core).
We use a learning rate warm up of 500. The sequence length was limited to 128 tokens. We used the AdamW optimizer with
a 2e-5 learning rate. The full training script is accessible in this current repository: `train_script.py`.
#### Training data
We use the concatenation from multiple datasets to fine-tune our model. The total number of sentence pairs is above 1 billion sentences.
We sampled each dataset given a weighted probability which configuration is detailed in the `data_config.json` file.
| Dataset | Paper | Number of training tuples |
|--------------------------------------------------------|:----------------------------------------:|:--------------------------:|
| [Reddit comments (2015-2018)](https://github.com/PolyAI-LDN/conversational-datasets/tree/master/reddit) | [paper](https://arxiv.org/abs/1904.06472) | 726,484,430 |
| [S2ORC](https://github.com/allenai/s2orc) Citation pairs (Abstracts) | [paper](https://aclanthology.org/2020.acl-main.447/) | 116,288,806 |
| [WikiAnswers](https://github.com/afader/oqa#wikianswers-corpus) Duplicate question pairs | [paper](https://doi.org/10.1145/2623330.2623677) | 77,427,422 |
| [PAQ](https://github.com/facebookresearch/PAQ) (Question, Answer) pairs | [paper](https://arxiv.org/abs/2102.07033) | 64,371,441 |
| [S2ORC](https://github.com/allenai/s2orc) Citation pairs (Titles) | [paper](https://aclanthology.org/2020.acl-main.447/) | 52,603,982 |
| [S2ORC](https://github.com/allenai/s2orc) (Title, Abstract) | [paper](https://aclanthology.org/2020.acl-main.447/) | 41,769,185 |
| [Stack Exchange](https://huggingface.co/datasets/flax-sentence-embeddings/stackexchange_xml) (Title, Body) pairs | - | 25,316,456 |
| [Stack Exchange](https://huggingface.co/datasets/flax-sentence-embeddings/stackexchange_xml) (Title+Body, Answer) pairs | - | 21,396,559 |
| [Stack Exchange](https://huggingface.co/datasets/flax-sentence-embeddings/stackexchange_xml) (Title, Answer) pairs | - | 21,396,559 |
| [MS MARCO](https://microsoft.github.io/msmarco/) triplets | [paper](https://doi.org/10.1145/3404835.3462804) | 9,144,553 |
| [GOOAQ: Open Question Answering with Diverse Answer Types](https://github.com/allenai/gooaq) | [paper](https://arxiv.org/pdf/2104.08727.pdf) | 3,012,496 |
| [Yahoo Answers](https://www.kaggle.com/soumikrakshit/yahoo-answers-dataset) (Title, Answer) | [paper](https://proceedings.neurips.cc/paper/2015/hash/250cf8b51c773f3f8dc8b4be867a9a02-Abstract.html) | 1,198,260 |
| [Code Search](https://huggingface.co/datasets/code_search_net) | - | 1,151,414 |
| [COCO](https://cocodataset.org/#home) Image captions | [paper](https://link.springer.com/chapter/10.1007%2F978-3-319-10602-1_48) | 828,395|
| [SPECTER](https://github.com/allenai/specter) citation triplets | [paper](https://doi.org/10.18653/v1/2020.acl-main.207) | 684,100 |
| [Yahoo Answers](https://www.kaggle.com/soumikrakshit/yahoo-answers-dataset) (Question, Answer) | [paper](https://proceedings.neurips.cc/paper/2015/hash/250cf8b51c773f3f8dc8b4be867a9a02-Abstract.html) | 681,164 |
| [Yahoo Answers](https://www.kaggle.com/soumikrakshit/yahoo-answers-dataset) (Title, Question) | [paper](https://proceedings.neurips.cc/paper/2015/hash/250cf8b51c773f3f8dc8b4be867a9a02-Abstract.html) | 659,896 |
| [SearchQA](https://huggingface.co/datasets/search_qa) | [paper](https://arxiv.org/abs/1704.05179) | 582,261 |
| [Eli5](https://huggingface.co/datasets/eli5) | [paper](https://doi.org/10.18653/v1/p19-1346) | 325,475 |
| [Flickr 30k](https://shannon.cs.illinois.edu/DenotationGraph/) | [paper](https://transacl.org/ojs/index.php/tacl/article/view/229/33) | 317,695 |
| [Stack Exchange](https://huggingface.co/datasets/flax-sentence-embeddings/stackexchange_xml) Duplicate questions (titles) | | 304,525 |
| AllNLI ([SNLI](https://nlp.stanford.edu/projects/snli/) and [MultiNLI](https://cims.nyu.edu/~sbowman/multinli/) | [paper SNLI](https://doi.org/10.18653/v1/d15-1075), [paper MultiNLI](https://doi.org/10.18653/v1/n18-1101) | 277,230 |
| [Stack Exchange](https://huggingface.co/datasets/flax-sentence-embeddings/stackexchange_xml) Duplicate questions (bodies) | | 250,519 |
| [Stack Exchange](https://huggingface.co/datasets/flax-sentence-embeddings/stackexchange_xml) Duplicate questions (titles+bodies) | | 250,460 |
| [Sentence Compression](https://github.com/google-research-datasets/sentence-compression) | [paper](https://www.aclweb.org/anthology/D13-1155/) | 180,000 |
| [Wikihow](https://github.com/pvl/wikihow_pairs_dataset) | [paper](https://arxiv.org/abs/1810.09305) | 128,542 |
| [Altlex](https://github.com/chridey/altlex/) | [paper](https://aclanthology.org/P16-1135.pdf) | 112,696 |
| [Quora Question Triplets](https://quoradata.quora.com/First-Quora-Dataset-Release-Question-Pairs) | - | 103,663 |
| [Simple Wikipedia](https://cs.pomona.edu/~dkauchak/simplification/) | [paper](https://www.aclweb.org/anthology/P11-2117/) | 102,225 |
| [Natural Questions (NQ)](https://ai.google.com/research/NaturalQuestions) | [paper](https://transacl.org/ojs/index.php/tacl/article/view/1455) | 100,231 |
| [SQuAD2.0](https://rajpurkar.github.io/SQuAD-explorer/) | [paper](https://aclanthology.org/P18-2124.pdf) | 87,599 |
| [TriviaQA](https://huggingface.co/datasets/trivia_qa) | - | 73,346 |
| **Total** | | **1,170,060,424** |

View File

@@ -0,0 +1,30 @@
{
"add_cross_attention": false,
"architectures": [
"BertModel"
],
"attention_probs_dropout_prob": 0.1,
"bos_token_id": null,
"classifier_dropout": null,
"dtype": "float32",
"eos_token_id": null,
"gradient_checkpointing": false,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 384,
"initializer_range": 0.02,
"intermediate_size": 1536,
"is_decoder": false,
"layer_norm_eps": 1e-12,
"max_position_embeddings": 512,
"model_type": "bert",
"num_attention_heads": 12,
"num_hidden_layers": 6,
"pad_token_id": 0,
"position_embedding_type": "absolute",
"tie_word_embeddings": true,
"transformers_version": "5.3.0",
"type_vocab_size": 2,
"use_cache": true,
"vocab_size": 30522
}

View File

@@ -0,0 +1,14 @@
{
"__version__": {
"sentence_transformers": "5.3.0",
"transformers": "5.3.0",
"pytorch": "2.10.0+cpu"
},
"model_type": "SentenceTransformer",
"prompts": {
"query": "",
"document": ""
},
"default_prompt_name": null,
"similarity_fn_name": "cosine"
}

Binary file not shown.

View File

@@ -0,0 +1,20 @@
[
{
"idx": 0,
"name": "0",
"path": "",
"type": "sentence_transformers.models.Transformer"
},
{
"idx": 1,
"name": "1",
"path": "1_Pooling",
"type": "sentence_transformers.models.Pooling"
},
{
"idx": 2,
"name": "2",
"path": "2_Normalize",
"type": "sentence_transformers.models.Normalize"
}
]

View File

@@ -0,0 +1,4 @@
{
"max_seq_length": 256,
"do_lower_case": false
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,23 @@
{
"backend": "tokenizers",
"cls_token": "[CLS]",
"do_basic_tokenize": true,
"do_lower_case": true,
"is_local": false,
"mask_token": "[MASK]",
"max_length": 128,
"model_max_length": 256,
"never_split": null,
"pad_to_multiple_of": null,
"pad_token": "[PAD]",
"pad_token_type_id": 0,
"padding_side": "right",
"sep_token": "[SEP]",
"stride": 0,
"strip_accents": null,
"tokenize_chinese_chars": true,
"tokenizer_class": "BertTokenizer",
"truncation_side": "right",
"truncation_strategy": "longest_first",
"unk_token": "[UNK]"
}

View File

@@ -0,0 +1,101 @@
import sqlite3
import json
import os
from fastapi import FastAPI, Body, HTTPException
from fastapi.middleware.cors import CORSMiddleware
import uvicorn
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_methods=["*"],
allow_headers=["*"],
)
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DB_FILE = os.path.join(BASE_DIR, "archivium.db")
def get_db_connection():
conn = sqlite3.connect(DB_FILE)
conn.execute("PRAGMA journal_mode=WAL;")
conn.row_factory = sqlite3.Row
return conn
def init_db():
with get_db_connection() as conn:
conn.execute("""
CREATE TABLE IF NOT EXISTS archive
(
id
INTEGER
PRIMARY
KEY
AUTOINCREMENT,
filename
TEXT
UNIQUE,
ocr_text
TEXT,
metadata
TEXT,
created_at
TIMESTAMP
DEFAULT
CURRENT_TIMESTAMP
)
""")
conn.commit()
init_db()
@app.post("/save-document")
async def save_document(data: dict = Body(...)):
title = data.get("title")
content = data.get("content")
if not title or content is None:
raise HTTPException(status_code=400, detail="Missing title or content")
content_str = json.dumps(content)
try:
with get_db_connection() as conn:
conn.execute("""
INSERT INTO archive (filename, ocr_text)
VALUES (?, ?) ON CONFLICT(filename) DO
UPDATE SET
ocr_text=excluded.ocr_text
""", (title, content_str))
conn.commit()
return {"status": "success"}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@app.get("/load-document")
async def load_document(title: str = None):
with get_db_connection() as conn:
if title:
row = conn.execute("SELECT filename, ocr_text FROM archive WHERE filename = ?", (title,)).fetchone()
else:
row = conn.execute("SELECT filename, ocr_text FROM archive ORDER BY id DESC LIMIT 1").fetchone()
if row:
try:
content_val = json.loads(row['ocr_text'])
except:
content_val = row['ocr_text']
return {"title": row['filename'], "content": content_val}
raise HTTPException(status_code=404, detail="Document not found")
if __name__ == "__main__":
uvicorn.run(app, host="127.0.0.1", port=8000)

View File

@@ -0,0 +1,134 @@
import sqlite3
import os
import numpy as np
from fastapi import FastAPI, Body, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from sentence_transformers import SentenceTransformer
import uvicorn
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_methods=["*"],
allow_headers=["*"],
)
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DB_FILE = os.path.join(BASE_DIR, "assets.db")
MODEL_DIR = os.path.join(BASE_DIR, "local_model_miniLM")
if not os.path.exists(MODEL_DIR):
model = SentenceTransformer('all-MiniLM-L6-v2')
model.save(MODEL_DIR)
else:
model = SentenceTransformer(MODEL_DIR)
def get_db_connection():
conn = sqlite3.connect(DB_FILE)
conn.execute("PRAGMA journal_mode=WAL;")
conn.row_factory = sqlite3.Row
return conn
def init_db():
with get_db_connection() as conn:
conn.execute("""
CREATE TABLE IF NOT EXISTS documents
(
id
INTEGER
PRIMARY
KEY
AUTOINCREMENT,
title
TEXT
UNIQUE,
content
BLOB,
content_type
TEXT,
embedding
BLOB
)
""")
conn.commit()
init_db()
@app.post("/save-document")
async def save_document(
title: str = Body(...),
content: str = Body(...),
content_type: str = Body("text/plain")
):
vector = model.encode(f"{title} {content}").astype(np.float32).tobytes()
try:
with get_db_connection() as conn:
conn.execute("""
INSERT INTO documents (title, content, content_type, embedding)
VALUES (?, ?, ?, ?) ON CONFLICT(title) DO
UPDATE SET
content=excluded.content,
content_type=excluded.content_type,
embedding=excluded.embedding
""", (title, content.encode('utf-8'), content_type, vector))
conn.commit()
return {"status": "success", "message": f"Dokument '{title}' zapisany."}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@app.post("/search")
async def search_similar(query: str = Body(..., embed=True), top_k: int = 3):
"""Wyszukiwanie semantyczne (Vector Search)"""
query_vector = model.encode(query).astype(np.float32)
with get_db_connection() as conn:
cursor = conn.execute("SELECT title, content, embedding FROM documents")
rows = cursor.fetchall()
results = []
for row in rows:
db_vector = np.frombuffer(row['embedding'], dtype=np.float32)
score = np.dot(query_vector, db_vector) / (np.linalg.norm(query_vector) * np.linalg.norm(db_vector))
results.append({
"title": row['title'],
"content": row['content'].decode('utf-8', errors='ignore'),
"score": float(score)
})
results = sorted(results, key=lambda x: x['score'], reverse=True)[:top_k]
return {"results": results}
@app.get("/load-document")
async def load_document(title: str = None):
with get_db_connection() as conn:
if title:
row = conn.execute("SELECT title, content FROM documents WHERE title = ?", (title,)).fetchone()
else:
row = conn.execute("SELECT title, content FROM documents ORDER BY id DESC LIMIT 1").fetchone()
if row:
return {
"title": row['title'],
"content": row['content'].decode('utf-8', errors='ignore')
}
return {"error": "Nie znaleziono dokumentu"}
if __name__ == "__main__":
uvicorn.run(app, host="127.0.0.1", port=8000)