feat: 龙虾记忆同步系统完整版本

功能特性:
- 文件树展示
- 差异对比
- 双向同步(本地 <-> 数据库)
- 版本历史追踪
- 统计信息展示

核心补丁:
1. 分块读取与流式传输(防止大文件内存飙升)
2. .lobsterignore 机制(排除临时文件)
3. 操作溯源(Audit Log,记录同步历史)

技术栈:
- 后端: Django + DRF + PostgreSQL
- 前端: React + Ant Design
- 部署: Docker + Docker Compose

项目已完整部署,可直接使用 docker-compose up -d 启动
This commit is contained in:
道童
2026-04-05 12:43:24 +00:00
commit 4374379d3f
26 changed files with 3270 additions and 0 deletions

17
backend/Dockerfile Normal file
View File

@@ -0,0 +1,17 @@
FROM python:3.11-slim
WORKDIR /app
# 安装依赖
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
# 复制代码
COPY . .
# 收集静态文件
RUN python manage.py collectstatic --noinput
EXPOSE 8087
CMD ["python", "manage.py", "runserver", "0.0.0.0:8087"]

23
backend/manage.py Normal file
View File

@@ -0,0 +1,23 @@
# Django manage.py
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'memory_sync.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,125 @@
from django.db import models
from django.core.validators import FileExtensionValidator
import hashlib
class LobsterMemory(models.Model):
"""龙虾记忆文件模型"""
STATUS_CHOICES = [
('consistent', '一致'),
('local_newer', '本地更新'),
('db_newer', '数据库更新'),
('conflict', '冲突'),
]
lobster_id = models.CharField(max_length=50, help_text='龙虾ID')
file_path = models.CharField(max_length=500, help_text='文件相对路径')
content = models.TextField(help_text='文件内容')
hash = models.CharField(max_length=64, help_text='SHA256哈希')
status = models.CharField(
max_length=20,
choices=STATUS_CHOICES,
default='consistent',
help_text='同步状态'
)
version = models.IntegerField(default=1, help_text='版本号')
size = models.IntegerField(default=0, help_text='文件大小(字节)')
created_at = models.DateTimeField(auto_now_add=True, help_text='创建时间')
updated_at = models.DateTimeField(auto_now=True, help_text='更新时间')
class Meta:
db_table = 'lobster_memory'
unique_together = ('lobster_id', 'file_path', 'version')
ordering = ['-updated_at']
indexes = [
models.Index(fields=['lobster_id', 'file_path']),
models.Index(fields=['status']),
models.Index(fields=['updated_at']),
]
def __str__(self):
return f"{self.lobster_id}/{self.file_path} (v{self.version})"
def compute_hash(self, content):
"""计算SHA256哈希"""
return hashlib.sha256(content.encode('utf-8')).hexdigest()
def save(self, *args, **kwargs):
"""保存时自动计算哈希和大小"""
if self.content:
self.hash = self.compute_hash(self.content)
self.size = len(self.content.encode('utf-8'))
super().save(*args, **kwargs)
class SyncHistory(models.Model):
"""同步操作历史记录"""
ACTION_CHOICES = [
('sync_to_db', '同步到数据库'),
('sync_to_local', '同步到本地'),
('auto_sync', '自动同步'),
('manual_merge', '手动合并'),
]
STATUS_CHOICES = [
('success', '成功'),
('failed', '失败'),
('partial', '部分成功'),
]
lobster_id = models.CharField(max_length=50, help_text='龙虾ID')
file_path = models.CharField(max_length=500, help_text='文件相对路径')
action = models.CharField(
max_length=20,
choices=ACTION_CHOICES,
help_text='操作类型'
)
status = models.CharField(
max_length=20,
choices=STATUS_CHOICES,
help_text='操作状态'
)
old_version = models.IntegerField(null=True, blank=True, help_text='操作前版本')
new_version = models.IntegerField(null=True, blank=True, help_text='操作后版本')
old_hash = models.CharField(max_length=64, null=True, blank=True, help_text='操作前哈希')
new_hash = models.CharField(max_length=64, null=True, blank=True, help_text='操作后哈希')
file_size = models.IntegerField(default=0, help_text='文件大小(字节)')
operator = models.CharField(max_length=50, default='system', help_text='操作者')
error_message = models.TextField(null=True, blank=True, help_text='错误信息')
execution_time = models.FloatField(default=0, help_text='执行时间(秒)')
created_at = models.DateTimeField(auto_now_add=True, help_text='操作时间')
class Meta:
db_table = 'sync_history'
ordering = ['-created_at']
indexes = [
models.Index(fields=['lobster_id', 'file_path']),
models.Index(fields=['action']),
models.Index(fields=['status']),
models.Index(fields=['created_at']),
]
def __str__(self):
return f"{self.action} - {self.lobster_id}/{self.file_path} ({self.status})"

View File

@@ -0,0 +1,64 @@
from rest_framework import serializers
from .models import LobsterMemory, SyncHistory
class LobsterMemorySerializer(serializers.ModelSerializer):
"""龙虾记忆序列化器"""
class Meta:
model = LobsterMemory
fields = [
'id',
'lobster_id',
'file_path',
'content',
'hash',
'status',
'version',
'size',
'created_at',
'updated_at',
]
read_only_fields = ['id', 'created_at', 'updated_at']
class SyncHistorySerializer(serializers.ModelSerializer):
"""同步历史序列化器"""
action_display = serializers.CharField(source='get_action_display', read_only=True)
status_display = serializers.CharField(source='get_status_display', read_only=True)
class Meta:
model = SyncHistory
fields = [
'id',
'lobster_id',
'file_path',
'action',
'action_display',
'status',
'status_display',
'old_version',
'new_version',
'old_hash',
'new_hash',
'file_size',
'operator',
'error_message',
'execution_time',
'created_at',
]
read_only_fields = ['id', 'created_at']
class FileDiffSerializer(serializers.Serializer):
"""文件差异序列化器"""
file_path = serializers.CharField()
lobster_id = serializers.CharField()
local_content = serializers.CharField(required=False)
db_content = serializers.CharField(required=False)
local_hash = serializers.CharField(required=False)
db_hash = serializers.CharField(required=False)
status = serializers.CharField()
message = serializers.CharField(required=False)

View File

@@ -0,0 +1,496 @@
import os
import hashlib
import fnmatch
import time
from pathlib import Path
from typing import List, Dict, Tuple, Iterator
from django.conf import settings
from django.utils import timezone
class IgnorePattern:
""".lobsterignore 模式匹配器"""
def __init__(self, base_dir: Path):
self.base_dir = base_dir
self.patterns = []
self.load_patterns()
def load_patterns(self):
"""加载 .lobsterignore 文件"""
ignore_file = self.base_dir / '.lobsterignore'
if ignore_file.exists():
with open(ignore_file, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
# 跳过空行和注释
if line and not line.startswith('#'):
self.patterns.append(line)
# 添加默认忽略规则
default_patterns = [
'.DS_Store', '.git', '.gitignore', '__pycache__',
'node_modules', '*.pyc', '*.pyo', '*.log',
'*.tmp', '*.temp', '*.bak', '.vscode', '.idea'
]
for pattern in default_patterns:
if pattern not in self.patterns:
self.patterns.append(pattern)
def is_ignored(self, file_path: Path) -> bool:
"""
判断文件是否被忽略
Args:
file_path: 文件路径(绝对路径)
Returns:
是否被忽略
"""
relative_path = file_path.relative_to(self.base_dir)
for pattern in self.patterns:
# 匹配文件名
if fnmatch.fnmatch(file_path.name, pattern):
return True
# 匹配相对路径
if fnmatch.fnmatch(str(relative_path), pattern):
return True
# 匹配目录
if pattern.endswith('/') and fnmatch.fnmatch(str(relative_path.parent), pattern.rstrip('/')):
return True
# 递归匹配子目录
if pattern.startswith('*/'):
parts = str(relative_path).split(os.sep)
for i, part in enumerate(parts):
if fnmatch.fnmatch(part, pattern[2:]):
return True
return False
class FileScanner:
"""文件扫描器(支持 .lobsterignore 和分块读取)"""
def __init__(self):
self.base_dir = Path(settings.LOBSTER_MEMORY_BASE)
self.supported_extensions = settings.SUPPORTED_EXTENSIONS
self.ignore = IgnorePattern(self.base_dir)
self.chunk_size = 8192 # 8KB 分块读取
def scan_directory(self, lobster_id: str = None) -> List[Dict]:
"""
扫描目录,返回所有文件信息
Args:
lobster_id: 龙虾ID可选
Returns:
文件信息列表
"""
if not self.base_dir.exists():
return []
files = []
for file_path in self.base_dir.rglob('*'):
if not file_path.is_file():
continue
# 检查文件扩展名
if file_path.suffix not in self.supported_extensions:
continue
# 检查是否被 .lobsterignore 忽略
if self.ignore.is_ignored(file_path):
continue
try:
relative_path = file_path.relative_to(self.base_dir)
# 使用流式读取获取哈希(避免大文件内存问题)
file_hash = self.compute_hash_stream(file_path)
files.append({
'file_path': str(relative_path),
'full_path': str(file_path),
'hash': file_hash,
'size': file_path.stat().st_size,
'lobster_id': lobster_id or 'unknown',
})
except Exception as e:
print(f"Error reading {file_path}: {e}")
return files
def get_file_content(self, file_path: str, chunked: bool = False) -> Tuple[str, str]:
"""
获取文件内容和哈希
Args:
file_path: 相对路径
chunked: 是否使用分块读取
Returns:
(content, hash)
"""
full_path = self.base_dir / file_path
if not full_path.exists():
raise FileNotFoundError(f"File not found: {file_path}")
# 对于大文件(>50MB使用分块读取
file_size = full_path.stat().st_size
if chunked and file_size > 50 * 1024 * 1024:
content = self.read_file_chunked(full_path)
else:
content = full_path.read_text(encoding='utf-8', errors='ignore')
file_hash = self.compute_hash(content)
return content, file_hash
def read_file_chunked(self, file_path: Path) -> str:
"""
分块读取文件
Args:
file_path: 文件路径
Returns:
文件内容
"""
content_parts = []
with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
while True:
chunk = f.read(self.chunk_size)
if not chunk:
break
content_parts.append(chunk)
return ''.join(content_parts)
def read_file_stream(self, file_path: str) -> Iterator[str]:
"""
流式读取文件(用于大文件传输)
Args:
file_path: 相对路径
Yields:
文件块
"""
full_path = self.base_dir / file_path
if not full_path.exists():
raise FileNotFoundError(f"File not found: {file_path}")
with open(full_path, 'r', encoding='utf-8', errors='ignore') as f:
while True:
chunk = f.read(self.chunk_size)
if not chunk:
break
yield chunk
def write_file(self, file_path: str, content: str):
"""
写入文件
Args:
file_path: 相对路径
content: 文件内容
"""
full_path = self.base_dir / file_path
# 确保目录存在
full_path.parent.mkdir(parents=True, exist_ok=True)
# 写入文件
full_path.write_text(content, encoding='utf-8')
def compute_hash(self, content: str) -> str:
"""
计算SHA256哈希
Args:
content: 文件内容
Returns:
哈希值
"""
return hashlib.sha256(content.encode('utf-8')).hexdigest()
def compute_hash_stream(self, file_path: Path) -> str:
"""
流式计算文件哈希(避免大文件内存问题)
Args:
file_path: 文件路径
Returns:
哈希值
"""
hash_obj = hashlib.sha256()
with open(file_path, 'rb') as f:
while True:
chunk = f.read(self.chunk_size)
if not chunk:
break
hash_obj.update(chunk)
return hash_obj.hexdigest()
def get_file_tree(self, lobster_id: str = None) -> Dict:
"""
获取文件树结构
Args:
lobster_id: 龙虾ID
Returns:
文件树字典
"""
files = self.scan_directory(lobster_id)
tree = {}
for file_info in files:
parts = Path(file_info['file_path']).parts
current = tree
for part in parts[:-1]:
if part not in current:
current[part] = {}
current = current[part]
filename = parts[-1]
current[filename] = file_info
return tree
class DiffChecker:
"""差异检查器(支持大文件优化)"""
def __init__(self):
self.scanner = FileScanner()
def check_sync_status(self, local_files: List[Dict], db_files: List[Dict]) -> Dict:
"""
检查同步状态
Args:
local_files: 本地文件列表
db_files: 数据库文件列表
Returns:
同步状态字典
"""
local_map = {f['file_path']: f for f in local_files}
db_map = {f['file_path']: f for f in db_files}
results = {
'consistent': [],
'local_newer': [],
'db_newer': [],
'conflict': [],
'local_only': [],
'db_only': [],
}
all_paths = set(local_map.keys()) | set(db_map.keys())
for path in all_paths:
local = local_map.get(path)
db = db_map.get(path)
if local and db:
# 两边都存在
if local['hash'] == db['hash']:
results['consistent'].append({
'file_path': path,
'status': 'consistent'
})
else:
# 比较更新时间
local_time = db.get('updated_at') if db else None
if local_time:
# 数据库有更新时间,比较
if local['hash'] != db['hash']:
results['conflict'].append({
'file_path': path,
'status': 'conflict',
'local_hash': local['hash'],
'db_hash': db['hash']
})
else:
# 无法判断,标记为冲突
results['conflict'].append({
'file_path': path,
'status': 'conflict',
'local_hash': local['hash'],
'db_hash': db['hash']
})
elif local and not db:
# 只有本地
results['local_only'].append({
'file_path': path,
'status': 'local_only'
})
elif not local and db:
# 只有数据库
results['db_only'].append({
'file_path': path,
'status': 'db_only'
})
return results
def get_file_diff(self, local_content: str, db_content: str, max_lines: int = 1000) -> Dict:
"""
获取文件差异(支持大文件限制)
Args:
local_content: 本地内容
db_content: 数据库内容
max_lines: 最大显示行数(防止大文件差异过大)
Returns:
差异信息
"""
local_lines = local_content.split('\n')
db_lines = db_content.split('\n')
# 限制行数(大文件只显示头尾)
if len(local_lines) > max_lines:
local_head = local_lines[:max_lines//2]
local_tail = local_lines[-max_lines//2:]
local_lines = local_head + ['... (中间省略 {}) 行 ...'.format(len(local_lines) - max_lines)] + local_tail
if len(db_lines) > max_lines:
db_head = db_lines[:max_lines//2]
db_tail = db_lines[-max_lines//2:]
db_lines = db_head + ['... (中间省略 {}) 行 ...'.format(len(db_lines) - max_lines)] + db_tail
return {
'local_lines': local_lines,
'db_lines': db_lines,
'has_diff': local_content != db_content,
'is_truncated': len(local_lines) > max_lines or len(db_lines) > max_lines
}
class AuditLogger:
"""操作日志记录器"""
def __init__(self):
self.model = None
# 延迟导入模型(避免循环导入)
from .models import SyncHistory
self.model = SyncHistory
def log_sync_action(
self,
lobster_id: str,
file_path: str,
action: str,
old_version: int = None,
new_version: int = None,
old_hash: str = None,
new_hash: str = None,
file_size: int = 0,
operator: str = 'system',
status: str = 'success',
error_message: str = None,
execution_time: float = 0
):
"""
记录同步操作
Args:
lobster_id: 龙虾ID
file_path: 文件路径
action: 操作类型
old_version: 操作前版本
new_version: 操作后版本
old_hash: 操作前哈希
new_hash: 操作后哈希
file_size: 文件大小
operator: 操作者
status: 操作状态
error_message: 错误信息
execution_time: 执行时间
"""
self.model.objects.create(
lobster_id=lobster_id,
file_path=file_path,
action=action,
old_version=old_version,
new_version=new_version,
old_hash=old_hash,
new_hash=new_hash,
file_size=file_size,
operator=operator,
status=status,
error_message=error_message,
execution_time=execution_time,
created_at=timezone.now()
)
def get_history(
self,
lobster_id: str = None,
file_path: str = None,
action: str = None,
limit: int = 100
) -> List[Dict]:
"""
获取操作历史
Args:
lobster_id: 龙虾ID可选
file_path: 文件路径(可选)
action: 操作类型(可选)
limit: 返回数量限制
Returns:
操作历史列表
"""
queryset = self.model.objects.all()
if lobster_id:
queryset = queryset.filter(lobster_id=lobster_id)
if file_path:
queryset = queryset.filter(file_path=file_path)
if action:
queryset = queryset.filter(action=action)
records = queryset.order_by('-created_at')[:limit]
return [
{
'id': r.id,
'lobster_id': r.lobster_id,
'file_path': r.file_path,
'action': r.action,
'status': r.status,
'old_version': r.old_version,
'new_version': r.new_version,
'old_hash': r.old_hash,
'new_hash': r.new_hash,
'file_size': r.file_size,
'operator': r.operator,
'error_message': r.error_message,
'execution_time': r.execution_time,
'created_at': r.created_at.isoformat(),
}
for r in records
]

View File

@@ -0,0 +1,31 @@
from django.urls import path
from . import views
urlpatterns = [
# 扫描相关
path('scan/', views.scan_files, name='scan_files'),
path('tree/', views.get_file_tree, name='get_file_tree'),
# 同步状态
path('status/', views.check_sync_status, name='check_sync_status'),
# 差异对比
path('diff/', views.get_file_diff, name='get_file_diff'),
# 同步操作
path('sync/db/', views.sync_to_db, name='sync_to_db'),
path('sync/local/', views.sync_to_local, name='sync_to_local'),
# 版本历史
path('versions/', views.get_versions, name='get_versions'),
# 操作历史
path('history/', views.get_history, name='get_history'),
# 统计信息
path('stats/', views.get_stats, name='get_stats'),
# .lobsterignore 管理
path('ignore/patterns/', views.get_ignore_patterns, name='get_ignore_patterns'),
path('ignore/reload/', views.reload_ignore_patterns, name='reload_ignore_patterns'),
]

448
backend/memory_app/views.py Normal file
View File

@@ -0,0 +1,448 @@
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework import status
from .models import LobsterMemory
from .serializers import LobsterMemorySerializer, FileDiffSerializer
from .services import FileScanner, DiffChecker, AuditLogger
import json
import time
@api_view(['GET'])
def scan_files(request):
"""
扫描本地文件
"""
lobster_id = request.query_params.get('lobster_id', 'daotong')
scanner = FileScanner()
files = scanner.scan_directory(lobster_id)
return Response({
'success': True,
'data': files,
'total': len(files)
})
@api_view(['GET'])
def get_file_tree(request):
"""
获取文件树
"""
lobster_id = request.query_params.get('lobster_id', 'daotong')
scanner = FileScanner()
tree = scanner.get_file_tree(lobster_id)
return Response({
'success': True,
'data': tree
})
@api_view(['GET'])
def check_sync_status(request):
"""
检查同步状态
"""
lobster_id = request.query_params.get('lobster_id', 'daotong')
# 获取本地文件
scanner = FileScanner()
local_files = scanner.scan_directory(lobster_id)
# 获取数据库文件
db_files = list(LobsterMemory.objects.filter(
lobster_id=lobster_id
).values('file_path', 'hash', 'version', 'updated_at'))
# 检查同步状态
checker = DiffChecker()
sync_status = checker.check_sync_status(local_files, db_files)
return Response({
'success': True,
'data': sync_status
})
@api_view(['GET'])
def get_file_diff(request):
"""
获取文件差异(支持大文件优化)
"""
file_path = request.query_params.get('file_path')
lobster_id = request.query_params.get('lobster_id', 'daotong')
chunked = request.query_params.get('chunked', 'false').lower() == 'true'
if not file_path:
return Response({
'success': False,
'error': 'file_path is required'
}, status=status.HTTP_400_BAD_REQUEST)
scanner = FileScanner()
# 获取本地内容(支持分块读取)
try:
local_content, local_hash = scanner.get_file_content(file_path, chunked=chunked)
except FileNotFoundError:
local_content = None
local_hash = None
# 获取数据库内容
try:
db_record = LobsterMemory.objects.filter(
lobster_id=lobster_id,
file_path=file_path
).order_by('-version').first()
if db_record:
db_content = db_record.content
db_hash = db_record.hash
else:
db_content = None
db_hash = None
except Exception as e:
return Response({
'success': False,
'error': str(e)
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
# 获取差异(支持大文件限制)
checker = DiffChecker()
if local_content and db_content:
diff = checker.get_file_diff(local_content, db_content)
else:
diff = {
'local_lines': local_content.split('\n') if local_content else [],
'db_lines': db_content.split('\n') if db_content else [],
'has_diff': local_content != db_content,
'is_truncated': False
}
# 确定状态
if local_hash == db_hash:
sync_status = 'consistent'
elif local_hash and not db_hash:
sync_status = 'local_newer'
elif not local_hash and db_hash:
sync_status = 'db_newer'
else:
sync_status = 'conflict'
return Response({
'success': True,
'data': {
'file_path': file_path,
'lobster_id': lobster_id,
'local_content': local_content,
'db_content': db_content,
'local_hash': local_hash,
'db_hash': db_hash,
'status': sync_status,
'diff': diff
}
})
@api_view(['POST'])
def sync_to_db(request):
"""
同步到数据库(带操作日志)
"""
lobster_id = request.data.get('lobster_id', 'daotong')
file_path = request.data.get('file_path')
operator = request.data.get('operator', 'system')
if not file_path:
return Response({
'success': False,
'error': 'file_path is required'
}, status=status.HTTP_400_BAD_REQUEST)
scanner = FileScanner()
audit_logger = AuditLogger()
start_time = time.time()
try:
# 读取本地文件
content, file_hash = scanner.get_file_content(file_path)
# 查找现有记录
existing = LobsterMemory.objects.filter(
lobster_id=lobster_id,
file_path=file_path
).order_by('-version').first()
old_version = existing.version if existing else None
old_hash = existing.hash if existing else None
if existing:
# 创建新版本
new_version = existing.version + 1
else:
new_version = 1
# 创建新记录
record = LobsterMemory.objects.create(
lobster_id=lobster_id,
file_path=file_path,
content=content,
hash=file_hash,
status='consistent',
version=new_version,
)
execution_time = time.time() - start_time
# 记录操作日志
audit_logger.log_sync_action(
lobster_id=lobster_id,
file_path=file_path,
action='sync_to_db',
old_version=old_version,
new_version=new_version,
old_hash=old_hash,
new_hash=file_hash,
file_size=record.size,
operator=operator,
status='success',
execution_time=execution_time
)
return Response({
'success': True,
'message': '已同步到数据库',
'data': LobsterMemorySerializer(record).data
})
except Exception as e:
execution_time = time.time() - start_time
# 记录失败日志
audit_logger.log_sync_action(
lobster_id=lobster_id,
file_path=file_path,
action='sync_to_db',
operator=operator,
status='failed',
error_message=str(e),
execution_time=execution_time
)
return Response({
'success': False,
'error': str(e)
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
@api_view(['POST'])
def sync_to_local(request):
"""
同步到本地(带操作日志)
"""
lobster_id = request.data.get('lobster_id', 'daotong')
file_path = request.data.get('file_path')
operator = request.data.get('operator', 'system')
if not file_path:
return Response({
'success': False,
'error': 'file_path is required'
}, status=status.HTTP_400_BAD_REQUEST)
scanner = FileScanner()
audit_logger = AuditLogger()
start_time = time.time()
try:
# 从数据库获取最新版本
db_record = LobsterMemory.objects.filter(
lobster_id=lobster_id,
file_path=file_path
).order_by('-version').first()
if not db_record:
return Response({
'success': False,
'error': 'File not found in database'
}, status=status.HTTP_404_NOT_FOUND)
# 获取本地哈希(如果存在)
try:
local_content, local_hash = scanner.get_file_content(file_path)
except FileNotFoundError:
local_hash = None
# 写入本地文件
scanner.write_file(file_path, db_record.content)
execution_time = time.time() - start_time
# 记录操作日志
audit_logger.log_sync_action(
lobster_id=lobster_id,
file_path=file_path,
action='sync_to_local',
old_version=None,
new_version=db_record.version,
old_hash=local_hash,
new_hash=db_record.hash,
file_size=db_record.size,
operator=operator,
status='success',
execution_time=execution_time
)
return Response({
'success': True,
'message': '已同步到本地',
'data': LobsterMemorySerializer(db_record).data
})
except Exception as e:
execution_time = time.time() - start_time
# 记录失败日志
audit_logger.log_sync_action(
lobster_id=lobster_id,
file_path=file_path,
action='sync_to_local',
operator=operator,
status='failed',
error_message=str(e),
execution_time=execution_time
)
return Response({
'success': False,
'error': str(e)
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
@api_view(['GET'])
def get_versions(request):
"""
获取文件的所有版本
"""
file_path = request.query_params.get('file_path')
lobster_id = request.query_params.get('lobster_id', 'daotong')
if not file_path:
return Response({
'success': False,
'error': 'file_path is required'
}, status=status.HTTP_400_BAD_REQUEST)
versions = LobsterMemory.objects.filter(
lobster_id=lobster_id,
file_path=file_path
).order_by('-version')
return Response({
'success': True,
'data': LobsterMemorySerializer(versions, many=True).data
})
@api_view(['GET'])
def get_stats(request):
"""
获取统计信息
"""
lobster_id = request.query_params.get('lobster_id', 'daotong')
total_files = LobsterMemory.objects.filter(lobster_id=lobster_id).count()
status_counts = {}
for status_choice, _ in LobsterMemory.STATUS_CHOICES:
count = LobsterMemory.objects.filter(
lobster_id=lobster_id,
status=status_choice
).count()
status_counts[status_choice] = count
# 获取总大小
from django.db.models import Sum
total_size = LobsterMemory.objects.filter(
lobster_id=lobster_id
).aggregate(total=Sum('size'))['total'] or 0
return Response({
'success': True,
'data': {
'total_files': total_files,
'status_counts': status_counts,
'total_size': total_size,
'total_size_mb': round(total_size / 1024 / 1024, 2)
}
})
@api_view(['GET'])
def get_history(request):
"""
获取操作历史
"""
lobster_id = request.query_params.get('lobster_id', 'daotong')
file_path = request.query_params.get('file_path')
action = request.query_params.get('action')
limit = int(request.query_params.get('limit', 100))
audit_logger = AuditLogger()
history = audit_logger.get_history(
lobster_id=lobster_id,
file_path=file_path,
action=action,
limit=limit
)
return Response({
'success': True,
'data': history,
'total': len(history)
})
@api_view(['GET'])
def get_ignore_patterns(request):
"""
获取 .lobsterignore 模式列表
"""
lobster_id = request.query_params.get('lobster_id', 'daotong')
scanner = FileScanner()
patterns = scanner.ignore.patterns
return Response({
'success': True,
'data': {
'patterns': patterns,
'total': len(patterns)
}
})
@api_view(['POST'])
def reload_ignore_patterns(request):
"""
重新加载 .lobsterignore 模式
"""
lobster_id = request.data.get('lobster_id', 'daotong')
scanner = FileScanner()
# 重新加载忽略规则
scanner.ignore.load_patterns()
return Response({
'success': True,
'message': '已重新加载忽略规则',
'data': {
'patterns': scanner.ignore.patterns,
'total': len(scanner.ignore.patterns)
}
})

View File

@@ -0,0 +1,101 @@
"""
Django settings for memory_sync project.
"""
from pathlib import Path
import os
BASE_DIR = Path(__file__).resolve().parent.parent
SECRET_KEY = 'django-insecure-dev-key-change-in-production'
DEBUG = True
ALLOWED_HOSTS = ['*']
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'corsheaders',
'memory_app',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'memory_sync.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'memory_sync.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.getenv('DB_NAME', 'lobster_memory'),
'USER': os.getenv('DB_USER', 'postgres'),
'PASSWORD': os.getenv('DB_PASSWORD', 'postgres'),
'HOST': os.getenv('DB_HOST', 'localhost'),
'PORT': os.getenv('DB_PORT', '5432'),
}
}
AUTH_PASSWORD_VALIDATORS = []
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_TZ = True
STATIC_URL = 'static/'
STATIC_ROOT = BASE_DIR / 'staticfiles'
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
# REST Framework
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.AllowAny',
],
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 100,
}
# CORS
CORS_ALLOW_ALL_ORIGINS = True
# 龙虾记忆目录
LOBSTER_MEMORY_BASE = os.getenv('LOBSTER_MEMORY_BASE', '/home/node/.openclaw/workspace/daotong')
# 支持的文件扩展名
SUPPORTED_EXTENSIONS = ['.md', '.txt', '.json', '.py', '.js', '.yaml', '.yml']

View File

@@ -0,0 +1,7 @@
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include('memory_app.urls')),
]

View File

@@ -0,0 +1,11 @@
"""
WSGI config for memory_sync project.
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'memory_sync.settings')
application = get_wsgi_application()

5
backend/requirements.txt Normal file
View File

@@ -0,0 +1,5 @@
Django>=4.2.0,<5.0.0
djangorestframework>=3.14.0
django-cors-headers>=4.0.0
psycopg2-binary>=2.9.0
python-dotenv>=1.0.0