Files
道童 7992ff0b89 feat: 更新 API 视图和序列化器
更新内容:
1. views.py
- 集成分块读取(所有文件操作强制使用 chunked=True)
- 集成语义摘要生成(SemanticSummaryGenerator)
- 记录变动行数(lines_changed)
- 记录数据源(source: local/database/manual)
- 完善 check_sync_status 支持 HARD_CONFLICT 状态
- get_file_diff 返回变动行数
- get_ignore_patterns 返回模式类型(glob/regex)

2. serializers.py
- 添加 status_display, source_display 字段
- 更新 LobsterMemorySerializer 包含 summary 字段
- 更新 SyncHistorySerializer 包含 lines_changed, source 字段

所有 API 接口已更新,支持完整功能。
2026-04-05 14:16:15 +00:00

531 lines
14 KiB
Python
Raw Permalink Blame History

This file contains ambiguous Unicode characters
This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
"""
龙虾记忆同步系统 - API 视图模块
集成所有核心功能:
- 分块与流式处理
- .lobsterignore 支持
- 审计日志
- 语义摘要
- 完善的冲突判定
"""
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework import status
from .models import LobsterMemory
from .serializers import LobsterMemorySerializer, FileDiffSerializer
from .services import (
FileScanner, DiffChecker, AuditLogger, SemanticSummaryGenerator
)
import time
@api_view(['GET'])
def scan_files(request):
"""
扫描本地文件
自动应用 .lobsterignore 规则过滤不需要同步的文件
使用流式哈希计算,避免大文件内存问题
"""
lobster_id = request.query_params.get('lobster_id', 'daotong')
scanner = FileScanner()
files = scanner.scan_directory(lobster_id)
return Response({
'success': True,
'data': files,
'total': len(files)
})
@api_view(['GET'])
def get_file_tree(request):
"""
获取文件树结构
展示所有未被 .lobsterignore 过滤的文件
"""
lobster_id = request.query_params.get('lobster_id', 'daotong')
scanner = FileScanner()
tree = scanner.get_file_tree(lobster_id)
return Response({
'success': True,
'data': tree
})
@api_view(['GET'])
def check_sync_status(request):
"""
检查同步状态(完善冲突判定)
支持的状态:
- consistent: 内容一致
- local_newer: 只有本地存在
- db_newer: 只有数据库存在
- conflict: 两边都存在但哈希不同
- hard_conflict: 严重冲突(版本 > 1 且 1 小时内更新)
- local_only: 仅本地
- db_only: 仅数据库
"""
lobster_id = request.query_params.get('lobster_id', 'daotong')
# 获取本地文件(应用 .lobsterignore
scanner = FileScanner()
local_files = scanner.scan_directory(lobster_id)
# 获取数据库文件
db_files = list(LobsterMemory.objects.filter(
lobster_id=lobster_id
).values('file_path', 'hash', 'version', 'updated_at'))
# 检查同步状态(包含 HARD_CONFLICT 判定)
checker = DiffChecker()
sync_status = checker.check_sync_status(local_files, db_files)
return Response({
'success': True,
'data': sync_status
})
@api_view(['GET'])
def get_file_diff(request):
"""
获取文件差异(支持大文件优化)
使用 8KB 分块读取,计算变动行数
"""
file_path = request.query_params.get('file_path')
lobster_id = request.query_params.get('lobster_id', 'daotong')
chunked = request.query_params.get('chunked', 'true').lower() == 'true'
if not file_path:
return Response({
'success': False,
'error': 'file_path is required'
}, status=status.HTTP_400_BAD_REQUEST)
scanner = FileScanner()
# 获取本地内容(强制使用分块读取)
try:
local_content, local_hash = scanner.get_file_content(file_path, chunked=chunked)
except FileNotFoundError:
local_content = None
local_hash = None
# 获取数据库内容
try:
db_record = LobsterMemory.objects.filter(
lobster_id=lobster_id,
file_path=file_path
).order_by('-version').first()
if db_record:
db_content = db_record.content
db_hash = db_record.hash
else:
db_content = None
db_hash = None
except Exception as e:
return Response({
'success': False,
'error': str(e)
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
# 获取差异(支持大文件限制,计算变动行数)
checker = DiffChecker()
if local_content and db_content:
diff = checker.get_file_diff(local_content, db_content)
else:
diff = {
'local_lines': local_content.split('\n') if local_content else [],
'db_lines': db_content.split('\n') if db_content else [],
'has_diff': local_content != db_content,
'is_truncated': False,
'lines_changed': 0
}
# 确定状态
if local_hash == db_hash:
sync_status = 'consistent'
elif local_hash and not db_hash:
sync_status = 'local_newer'
elif not local_hash and db_hash:
sync_status = 'db_newer'
else:
sync_status = 'conflict'
return Response({
'success': True,
'data': {
'file_path': file_path,
'lobster_id': lobster_id,
'local_content': local_content,
'db_content': db_content,
'local_hash': local_hash,
'db_hash': db_hash,
'status': sync_status,
'diff': diff
}
})
@api_view(['POST'])
def sync_to_db(request):
"""
同步到数据库(带完整审计日志)
功能:
- 使用分块读取文件
- 生成语义摘要
- 记录变动行数
- 记录数据源、操作人、执行时间
"""
lobster_id = request.data.get('lobster_id', 'daotong')
file_path = request.data.get('file_path')
operator = request.data.get('operator', 'system')
if not file_path:
return Response({
'success': False,
'error': 'file_path is required'
}, status=status.HTTP_400_BAD_REQUEST)
scanner = FileScanner()
audit_logger = AuditLogger()
summary_generator = SemanticSummaryGenerator()
start_time = time.time()
try:
# 读取本地文件(使用分块读取)
content, file_hash = scanner.get_file_content(file_path, chunked=True)
# 查找现有记录
existing = LobsterMemory.objects.filter(
lobster_id=lobster_id,
file_path=file_path
).order_by('-version').first()
old_version = existing.version if existing else None
old_hash = existing.hash if existing else None
old_content = existing.content if existing else None
# 计算变动行数
lines_changed = 0
if old_content:
checker = DiffChecker()
lines_changed = checker.calculate_lines_changed(old_content, content)
if existing:
# 创建新版本
new_version = existing.version + 1
else:
new_version = 1
# 生成语义摘要
summary = summary_generator.generate_summary(content)
# 创建新记录
record = LobsterMemory.objects.create(
lobster_id=lobster_id,
file_path=file_path,
content=content,
hash=file_hash,
status='consistent',
version=new_version,
summary=summary,
)
execution_time = time.time() - start_time
# 记录操作日志(包含变动行数和数据源)
audit_logger.log_sync_action(
lobster_id=lobster_id,
file_path=file_path,
action='sync_to_db',
old_version=old_version,
new_version=new_version,
old_hash=old_hash,
new_hash=file_hash,
file_size=record.size,
lines_changed=lines_changed,
source='local',
operator=operator,
status='success',
execution_time=execution_time
)
return Response({
'success': True,
'message': '已同步到数据库',
'data': LobsterMemorySerializer(record).data
})
except Exception as e:
execution_time = time.time() - start_time
# 记录失败日志
audit_logger.log_sync_action(
lobster_id=lobster_id,
file_path=file_path,
action='sync_to_db',
source='local',
operator=operator,
status='failed',
error_message=str(e),
execution_time=execution_time
)
return Response({
'success': False,
'error': str(e)
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
@api_view(['POST'])
def sync_to_local(request):
"""
同步到本地(带完整审计日志)
功能:
- 记录变动行数
- 记录数据源、操作人、执行时间
"""
lobster_id = request.data.get('lobster_id', 'daotong')
file_path = request.data.get('file_path')
operator = request.data.get('operator', 'system')
if not file_path:
return Response({
'success': False,
'error': 'file_path is required'
}, status=status.HTTP_400_BAD_REQUEST)
scanner = FileScanner()
audit_logger = AuditLogger()
start_time = time.time()
try:
# 从数据库获取最新版本
db_record = LobsterMemory.objects.filter(
lobster_id=lobster_id,
file_path=file_path
).order_by('-version').first()
if not db_record:
return Response({
'success': False,
'error': 'File not found in database'
}, status=status.HTTP_404_NOT_FOUND)
# 获取本地哈希(如果存在)
try:
local_content, local_hash = scanner.get_file_content(file_path, chunked=True)
except FileNotFoundError:
local_content = None
local_hash = None
# 计算变动行数
lines_changed = 0
if local_content:
checker = DiffChecker()
lines_changed = checker.calculate_lines_changed(local_content, db_record.content)
# 写入本地文件
scanner.write_file(file_path, db_record.content)
execution_time = time.time() - start_time
# 记录操作日志(包含变动行数和数据源)
audit_logger.log_sync_action(
lobster_id=lobster_id,
file_path=file_path,
action='sync_to_local',
old_version=None,
new_version=db_record.version,
old_hash=local_hash,
new_hash=db_record.hash,
file_size=db_record.size,
lines_changed=lines_changed,
source='database',
operator=operator,
status='success',
execution_time=execution_time
)
return Response({
'success': True,
'message': '已同步到本地',
'data': LobsterMemorySerializer(db_record).data
})
except Exception as e:
execution_time = time.time() - start_time
# 记录失败日志
audit_logger.log_sync_action(
lobster_id=lobster_id,
file_path=file_path,
action='sync_to_local',
source='database',
operator=operator,
status='failed',
error_message=str(e),
execution_time=execution_time
)
return Response({
'success': False,
'error': str(e)
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
@api_view(['GET'])
def get_versions(request):
"""
获取文件的所有版本(包含摘要)
"""
file_path = request.query_params.get('file_path')
lobster_id = request.query_params.get('lobster_id', 'daotong')
if not file_path:
return Response({
'success': False,
'error': 'file_path is required'
}, status=status.HTTP_400_BAD_REQUEST)
versions = LobsterMemory.objects.filter(
lobster_id=lobster_id,
file_path=file_path
).order_by('-version')
return Response({
'success': True,
'data': LobsterMemorySerializer(versions, many=True).data
})
@api_view(['GET'])
def get_stats(request):
"""
获取统计信息(包含 hard_conflict 状态)
"""
lobster_id = request.query_params.get('lobster_id', 'daotong')
total_files = LobsterMemory.objects.filter(lobster_id=lobster_id).count()
status_counts = {}
for status_choice, _ in LobsterMemory.STATUS_CHOICES:
count = LobsterMemory.objects.filter(
lobster_id=lobster_id,
status=status_choice
).count()
status_counts[status_choice] = count
# 获取总大小
from django.db.models import Sum
total_size = LobsterMemory.objects.filter(
lobster_id=lobster_id
).aggregate(total=Sum('size'))['total'] or 0
return Response({
'success': True,
'data': {
'total_files': total_files,
'status_counts': status_counts,
'total_size': total_size,
'total_size_mb': round(total_size / 1024 / 1024, 2)
}
})
@api_view(['GET'])
def get_history(request):
"""
获取操作历史(包含变动行数和数据源)
"""
lobster_id = request.query_params.get('lobster_id', 'daotong')
file_path = request.query_params.get('file_path')
action = request.query_params.get('action')
limit = int(request.query_params.get('limit', 100))
audit_logger = AuditLogger()
history = audit_logger.get_history(
lobster_id=lobster_id,
file_path=file_path,
action=action,
limit=limit
)
return Response({
'success': True,
'data': history,
'total': len(history)
})
@api_view(['GET'])
def get_ignore_patterns(request):
"""
获取 .lobsterignore 模式列表
显示所有生效的忽略规则,包括:
- 通配符模式 (*.pyc)
- 正则表达式模式 (re:.*\\.log\$)
- 默认规则
"""
lobster_id = request.query_params.get('lobster_id', 'daotong')
scanner = FileScanner()
patterns = []
for pattern_type, pattern, _ in scanner.ignore.patterns:
patterns.append({
'type': pattern_type,
'pattern': pattern
})
return Response({
'success': True,
'data': {
'patterns': patterns,
'total': len(patterns)
}
})
@api_view(['POST'])
def reload_ignore_patterns(request):
"""
重新加载 .lobsterignore 模式
当修改 .lobsterignore 文件后调用此接口
"""
lobster_id = request.data.get('lobster_id', 'daotong')
scanner = FileScanner()
# 重新加载忽略规则
scanner.ignore.load_patterns()
patterns = []
for pattern_type, pattern, _ in scanner.ignore.patterns:
patterns.append({
'type': pattern_type,
'pattern': pattern
})
return Response({
'success': True,
'message': '已重新加载忽略规则',
'data': {
'patterns': patterns,
'total': len(patterns)
}
})