diff --git a/.idea/.gitignore b/.idea/.gitignore
new file mode 100644
index 0000000..35410ca
--- /dev/null
+++ b/.idea/.gitignore
@@ -0,0 +1,8 @@
+# 默认忽略的文件
+/shelf/
+/workspace.xml
+# 基于编辑器的 HTTP 客户端请求
+/httpRequests/
+# Datasource local storage ignored files
+/dataSources/
+/dataSources.local.xml
diff --git a/.idea/dataSources.xml b/.idea/dataSources.xml
new file mode 100644
index 0000000..ffbe15c
--- /dev/null
+++ b/.idea/dataSources.xml
@@ -0,0 +1,20 @@
+
+
+
+
+ sqlite.xerial
+ true
+ org.sqlite.JDBC
+ jdbc:sqlite:E:\PythonWorkSpace\fastcopy\src\db_ntfs_info.db
+ $ProjectFileDir$
+
+
+ file://$APPLICATION_CONFIG_DIR$/jdbc-drivers/Xerial SQLiteJDBC/3.45.1/org/xerial/sqlite-jdbc/3.45.1.0/sqlite-jdbc-3.45.1.0.jar
+
+
+ file://$APPLICATION_CONFIG_DIR$/jdbc-drivers/Xerial SQLiteJDBC/3.45.1/org/slf4j/slf4j-api/1.7.36/slf4j-api-1.7.36.jar
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/fastcopy.iml b/.idea/fastcopy.iml
new file mode 100644
index 0000000..2c80e12
--- /dev/null
+++ b/.idea/fastcopy.iml
@@ -0,0 +1,10 @@
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/inspectionProfiles/Project_Default.xml b/.idea/inspectionProfiles/Project_Default.xml
new file mode 100644
index 0000000..16a6914
--- /dev/null
+++ b/.idea/inspectionProfiles/Project_Default.xml
@@ -0,0 +1,24 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/inspectionProfiles/profiles_settings.xml b/.idea/inspectionProfiles/profiles_settings.xml
new file mode 100644
index 0000000..105ce2d
--- /dev/null
+++ b/.idea/inspectionProfiles/profiles_settings.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/misc.xml b/.idea/misc.xml
new file mode 100644
index 0000000..d919c89
--- /dev/null
+++ b/.idea/misc.xml
@@ -0,0 +1,7 @@
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/modules.xml b/.idea/modules.xml
new file mode 100644
index 0000000..cc2fe28
--- /dev/null
+++ b/.idea/modules.xml
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/sqldialects.xml b/.idea/sqldialects.xml
new file mode 100644
index 0000000..c0e01ca
--- /dev/null
+++ b/.idea/sqldialects.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/vcs.xml b/.idea/vcs.xml
new file mode 100644
index 0000000..35eb1dd
--- /dev/null
+++ b/.idea/vcs.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.python-version b/.python-version
new file mode 100644
index 0000000..e4fba21
--- /dev/null
+++ b/.python-version
@@ -0,0 +1 @@
+3.12
diff --git a/db_manage/__init__.py b/db_manage/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/db_manage/create_tables.py b/db_manage/create_tables.py
new file mode 100644
index 0000000..a2e5fe9
--- /dev/null
+++ b/db_manage/create_tables.py
@@ -0,0 +1,378 @@
+import os
+import sqlite3
+
+
+def CreateDBConfigTable(db_path='../src/db_ntfs_info.db', table_name='db_config'):
+ """
+ 在指定路径下创建 SQLite 数据库,并在其中创建配置表。
+
+ :param db_path: str, 数据库文件的路径
+ :param table_name: str, 要创建的表名
+ :return: None
+ """
+ # 确保目录存在
+ directory = os.path.dirname(db_path)
+ if directory and not os.path.exists(directory):
+ os.makedirs(directory)
+
+ # 连接到SQLite数据库(如果文件不存在会自动创建)
+ conn = sqlite3.connect(db_path)
+
+ # 创建一个游标对象
+ cursor = conn.cursor()
+
+ # 动态构建创建表的SQL语句
+ create_table_sql = f"""
+ CREATE TABLE IF NOT EXISTS {table_name} (
+ ID INTEGER PRIMARY KEY AUTOINCREMENT,
+ Key TEXT UNIQUE NOT NULL,
+ Value TEXT NOT NULL
+ );
+ """
+
+ # 执行SQL语句
+ cursor.execute(create_table_sql)
+
+ # 提交更改
+ conn.commit()
+
+ # 关闭连接
+ conn.close()
+
+ print(f"表 [{table_name}] 已在数据库 [{db_path}] 中创建成功")
+
+
+def CreateDBDeviceTable(db_path='../src/db_ntfs_info.db', table_name='db_device'):
+ """
+ 在指定路径下创建 SQLite 数据库,并在其中创建设备信息表。
+
+ :param db_path: str, 数据库文件的路径
+ :param table_name: str, 要创建的表名
+ :return: None
+ """
+
+ # 确保目录存在
+ directory = os.path.dirname(db_path)
+ if directory and not os.path.exists(directory):
+ os.makedirs(directory)
+
+ # 连接到SQLite数据库(如果文件不存在会自动创建)
+ conn = sqlite3.connect(db_path)
+
+ # 创建一个游标对象
+ cursor = conn.cursor()
+
+ # 动态构建创建表的SQL语句
+ create_table_sql = f"""
+ CREATE TABLE IF NOT EXISTS {table_name} (
+ ID INTEGER PRIMARY KEY AUTOINCREMENT,
+ Path TEXT NOT NULL UNIQUE, -- 添加 UNIQUE 约束
+ Type TEXT NOT NULL CHECK(Type IN ('磁盘', '文件', '文件夹')),
+ Option TEXT
+ );
+ """
+
+ # 执行SQL语句
+ cursor.execute(create_table_sql)
+
+ # 提交更改
+ conn.commit()
+
+ # 关闭连接
+ conn.close()
+
+ print(f"表 [{table_name}] 已在数据库 [{db_path}] 中创建成功")
+
+
+def CreateDBNodeTable(db_path='../src/db_ntfs_info.db', table_name='db_node'):
+ """
+ 在指定路径下创建 SQLite 数据库,并在其中创建节点信息表。
+
+ :param db_path: str, 数据库文件的路径
+ :param table_name: str, 要创建的表名
+ :return: None
+ """
+
+ # 确保目录存在
+ directory = os.path.dirname(db_path)
+ if directory and not os.path.exists(directory):
+ os.makedirs(directory)
+
+ # 连接到SQLite数据库(如果文件不存在会自动创建)
+ conn = sqlite3.connect(db_path)
+
+ # 创建一个游标对象
+ cursor = conn.cursor()
+
+ # 动态构建创建表的SQL语句
+ create_table_sql = f"""
+ CREATE TABLE IF NOT EXISTS {table_name} (
+ ID INTEGER PRIMARY KEY AUTOINCREMENT,
+ PathID INTEGER,
+ ParentID INTEGER,
+ NameHash TEXT,
+ PathHash TEXT,
+ ExtendNameID INTEGER,
+ DirLayer INTEGER,
+ GroupID INTEGER,
+ UserID INTEGER,
+ FileCreateTime TEXT,
+ FileModifyTime TEXT,
+ FileAccessTime TEXT,
+ FileAuthTime TEXT,
+ FileSize INTEGER,
+ FileMode INTEGER,
+ FileHash TEXT,
+ ExtentCount INTEGER,
+ extent1_DeviceID INTEGER,
+ extent1_Location INTEGER,
+ extent1_Length INTEGER,
+ extent2_DeviceID INTEGER,
+ extent2_Location INTEGER,
+ extent2_Length INTEGER,
+ extent3_DeviceID INTEGER,
+ extent3_Location INTEGER,
+ extent3_Length INTEGER,
+ extent4_DeviceID INTEGER,
+ extent4_Location INTEGER,
+ extent4_Length INTEGER,
+
+ -- 外键约束(可选)
+ FOREIGN KEY(PathID) REFERENCES path_table(ID),
+ FOREIGN KEY(ExtendNameID) REFERENCES extname_table(ID),
+ FOREIGN KEY(GroupID) REFERENCES groups(ID),
+ FOREIGN KEY(UserID) REFERENCES users(ID)
+ );
+ """
+
+ # 执行SQL语句
+ cursor.execute(create_table_sql)
+
+ # 提交更改
+ conn.commit()
+
+ # 关闭连接
+ conn.close()
+
+ print(f"表 [{table_name}] 已在数据库 [{db_path}] 中创建成功")
+
+
+def CreateDBUserTable(db_path='../src/db_ntfs_info.db', table_name='db_user'):
+ """
+ 在指定路径下创建 SQLite 数据库,并在其中创建用户表。
+
+ :param db_path: str, 数据库文件的路径
+ :param table_name: str, 要创建的表名
+ :return: None
+ """
+
+ # 确保目录存在
+ directory = os.path.dirname(db_path)
+ if directory and not os.path.exists(directory):
+ os.makedirs(directory)
+
+ # 连接到SQLite数据库(如果文件不存在会自动创建)
+ conn = sqlite3.connect(db_path)
+
+ # 创建一个游标对象
+ cursor = conn.cursor()
+
+ # 动态构建创建表的SQL语句
+ create_table_sql = f"""
+ CREATE TABLE IF NOT EXISTS {table_name} (
+ ID INTEGER PRIMARY KEY AUTOINCREMENT,
+ UserName TEXT UNIQUE NOT NULL
+ );
+ """
+
+ # 执行SQL语句
+ cursor.execute(create_table_sql)
+
+ # 提交更改
+ conn.commit()
+
+ # 关闭连接
+ conn.close()
+
+ print(f"表 [{table_name}] 已在数据库 [{db_path}] 中创建成功")
+
+
+def CreateDBGroupTable(db_path='../src/db_ntfs_info.db', table_name='db_group'):
+ """
+ 在指定路径下创建 SQLite 数据库,并在其中创建组表。
+
+ :param db_path: str, 数据库文件的路径
+ :param table_name: str, 要创建的表名
+ :return: None
+ """
+
+ # 确保目录存在
+ directory = os.path.dirname(db_path)
+ if directory and not os.path.exists(directory):
+ os.makedirs(directory)
+
+ # 连接到SQLite数据库(如果文件不存在会自动创建)
+ conn = sqlite3.connect(db_path)
+
+ # 创建一个游标对象
+ cursor = conn.cursor()
+
+ # 动态构建创建表的SQL语句
+ create_table_sql = f"""
+ CREATE TABLE IF NOT EXISTS {table_name} (
+ ID INTEGER PRIMARY KEY AUTOINCREMENT,
+ GroupName TEXT UNIQUE NOT NULL
+ );
+ """
+
+ # 执行SQL语句
+ cursor.execute(create_table_sql)
+
+ # 提交更改
+ conn.commit()
+
+ # 关闭连接
+ conn.close()
+
+ print(f"表 [{table_name}] 已在数据库 [{db_path}] 中创建成功")
+
+
+def CreateDBExtendSnippetTable(db_path='../src/db_ntfs_info.db', table_name='db_extend_extent'):
+ """
+ 在指定路径下创建 SQLite 数据库,并在其中创建扩展片段表。
+
+ :param db_path: str, 数据库文件的路径
+ :param table_name: str, 要创建的表名
+ :return: None
+ """
+
+ # 确保目录存在
+ directory = os.path.dirname(db_path)
+ if directory and not os.path.exists(directory):
+ os.makedirs(directory)
+
+ # 连接到SQLite数据库(如果文件不存在会自动创建)
+ conn = sqlite3.connect(db_path)
+ cursor = conn.cursor()
+
+ # 动态构建创建表的SQL语句
+ create_table_sql = f"""
+ CREATE TABLE IF NOT EXISTS {table_name} (
+ ID INTEGER PRIMARY KEY AUTOINCREMENT,
+ NodeID INTEGER NOT NULL,
+ ExtentNumber INTEGER NOT NULL,
+ extent_DeviceID INTEGER,
+ extent_Location INTEGER,
+ extent_Length INTEGER,
+
+ -- 外键约束(可选)
+ FOREIGN KEY(NodeID) REFERENCES db_node(ID)
+ );
+ """
+
+ # 执行SQL语句
+ cursor.execute(create_table_sql)
+
+ # 提交更改
+ conn.commit()
+ conn.close()
+
+ print(f"表 [{table_name}] 已在数据库 [{db_path}] 中创建成功")
+
+
+def CreateDBPathTable(db_path='../src/db_path.db', table_name='db_path'):
+ """
+ 在指定路径下创建 SQLite 数据库,并在其中创建路径信息表,
+ 包含 DeviceID 字段,用于标记文件所属设备(磁盘)。
+
+ :param db_path: str, 数据库文件的路径
+ :param table_name: str, 要创建的表名
+ :return: None
+ """
+
+ # 确保目录存在
+ directory = os.path.dirname(db_path)
+ if directory and not os.path.exists(directory):
+ os.makedirs(directory)
+
+ # 连接到SQLite数据库(如果文件不存在会自动创建)
+ conn = sqlite3.connect(db_path)
+ cursor = conn.cursor()
+
+ # 动态构建创建表的SQL语句(包含 DeviceID 外键)
+ create_table_sql = f"""
+ CREATE TABLE IF NOT EXISTS {table_name} (
+ ID INTEGER PRIMARY KEY AUTOINCREMENT,
+-- DeviceID TEXT NOT NULL,
+ Path TEXT NOT NULL,
+ Name TEXT NOT NULL,
+ PathHash TEXT UNIQUE NOT NULL,
+ IsDir INTEGER NOT NULL CHECK(IsDir IN (0, 1)),
+ ParentID INTEGER,
+ ContentSize INTEGER,
+
+ -- 外键约束
+-- FOREIGN KEY(DeviceID) REFERENCES db_device(ID),
+ FOREIGN KEY(ParentID) REFERENCES {table_name}(ID)
+ );
+ """
+
+ # 执行SQL语句
+ cursor.execute(create_table_sql)
+
+ # 提交更改
+ conn.commit()
+ conn.close()
+
+ print(f"表 [{table_name}] 已在数据库 [{db_path}] 中创建成功")
+
+
+def CreateDBExtendNameTable(db_path='../src/db_extend_name.db', table_name='db_extend_name'):
+ """
+ 在指定路径下创建 SQLite 数据库,并在其中创建扩展名表。
+
+ :param db_path: str, 数据库文件的路径
+ :param table_name: str, 要创建的表名
+ :return: None
+ """
+
+ # 确保目录存在
+ directory = os.path.dirname(db_path)
+ if directory and not os.path.exists(directory):
+ os.makedirs(directory)
+
+ # 连接到SQLite数据库(如果文件不存在会自动创建)
+ conn = sqlite3.connect(db_path)
+ cursor = conn.cursor()
+
+ # 动态构建创建表的SQL语句
+ create_table_sql = f"""
+ CREATE TABLE IF NOT EXISTS {table_name} (
+ ID INTEGER PRIMARY KEY AUTOINCREMENT,
+ ExtendName TEXT UNIQUE NOT NULL
+ );
+ """
+
+ # 执行SQL语句
+ cursor.execute(create_table_sql)
+
+ # 提交更改
+ conn.commit()
+ conn.close()
+
+ print(f"表 [{table_name}] 已在数据库 [{db_path}] 中创建成功")
+
+
+def main():
+ CreateDBDeviceTable(db_path='../src/db_ntfs_info.db', table_name='db_device')
+ CreateDBConfigTable(db_path='../src/db_ntfs_info.db', table_name='db_config')
+ CreateDBNodeTable(db_path='../src/db_ntfs_info.db', table_name='db_node')
+ CreateDBUserTable(db_path='../src/db_ntfs_info.db', table_name='db_user')
+ CreateDBGroupTable(db_path='../src/db_ntfs_info.db', table_name='db_group')
+ CreateDBExtendSnippetTable(db_path='../src/db_ntfs_info.db', table_name='db_extend_extent')
+ CreateDBPathTable(db_path='../src/db_ntfs_info.db', table_name='db_path')
+ CreateDBExtendNameTable(db_path='../src/db_ntfs_info.db', table_name='db_extend_name')
+
+
+if __name__ == '__main__':
+ main()
diff --git a/db_manage/drop_all_tables.py b/db_manage/drop_all_tables.py
new file mode 100644
index 0000000..e0f59dc
--- /dev/null
+++ b/db_manage/drop_all_tables.py
@@ -0,0 +1,45 @@
+import sqlite3
+
+
+def DropAllTables(db_path):
+ """
+ 删除指定数据库中的所有8张表(如果存在)。
+
+ :param db_path: str, SQLite 数据库文件的路径
+ :return: None
+ """
+
+ # 表名列表(根据你之前创建的8张表)
+ tables = [
+ 'db_config', # 表1:配置表
+ 'db_device', # 表2:设备表
+ 'db_node', # 表3:节点信息表
+ 'db_group', # 表4:组表
+ 'db_user', # 表5:用户表
+ 'db_extend_extent', # 表6:扩展片段表
+ 'db_path', # 表7:路径表
+ 'db_extend_name' # 表8:扩展名表
+ ]
+
+ # 连接到SQLite数据库
+ conn = sqlite3.connect(db_path)
+ cursor = conn.cursor()
+
+ # 遍历并删除每张表
+ for table in tables:
+ cursor.execute(f"DROP TABLE IF EXISTS {table};")
+ print(f"表 [{table}] 已删除")
+
+ # 提交更改
+ conn.commit()
+ conn.close()
+
+ print("✅ 所有预定义表已删除完成")
+
+
+def main():
+ DropAllTables(db_path='../src/db_ntfs_info.db')
+
+
+if __name__ == '__main__':
+ main()
diff --git a/main.py b/main.py
new file mode 100644
index 0000000..e69de29
diff --git a/ntfs_utils/__init__.py b/ntfs_utils/__init__.py
new file mode 100644
index 0000000..7b97b73
--- /dev/null
+++ b/ntfs_utils/__init__.py
@@ -0,0 +1,45 @@
+from db_config import GetNTFSBootInfo, InsertInfoToDBConfig
+from db_device import ScanSpecialVolumes, InsertVolumesToDB
+from db_extend_name import InsertExtensionsToDB
+from db_group import InsertGroupToDB
+from db_path import GenerateHash, ShouldSkipPath, ScanVolume, InsertPathDataToDB
+from db_user import InsertUserToDB
+
+
+def main():
+ volume_letter = 'Z'
+
+ # 初始化 db_config 表
+ config_data = GetNTFSBootInfo(volume_letter)
+ InsertInfoToDBConfig(config_data)
+
+ # 初始化 db_device 表
+ device_data = ScanSpecialVolumes(volume_letter)
+ InsertVolumesToDB([device_data])
+
+ # 初始化 db_user 表
+ user_list = ["Copier"]
+ InsertUserToDB(user_list)
+
+ # 初始化 db_group 表
+ group_name_list = ["Copier"]
+ InsertGroupToDB(group_name_list)
+
+ # 初始化 db_path 表
+ scanned_data = ScanVolume(volume_letter)
+ InsertPathDataToDB(scanned_data)
+
+ # 初始化 db_extend_name 表
+ common_extensions = [
+ "txt", "log", "csv", "xls", "xlsx", "doc", "docx",
+ "ppt", "pptx", "pdf", "jpg", "jpeg", "png", "gif",
+ "bmp", "mp3", "wav", "mp4", "avi", "mkv", "mov",
+ "exe", "dll", "bat", "ini", "reg", "zip", "rar", "7z",
+ "json", "xml", "html", "css", "js", "py", "java", "cpp"
+ ]
+ count = InsertExtensionsToDB(common_extensions)
+ print(f"共插入 {count} 个新扩展名。")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ntfs_utils/db_config.py b/ntfs_utils/db_config.py
new file mode 100644
index 0000000..251607d
--- /dev/null
+++ b/ntfs_utils/db_config.py
@@ -0,0 +1,128 @@
+import ctypes
+import sqlite3
+
+
+def GetNTFSBootInfo(volume_letter):
+ """
+ 从指定 NTFS 卷的 $Boot 元文件中提取:
+ - Bytes per sector
+ - Sectors per cluster
+ - Cluster size (bytes)
+
+ 参数:
+ volume_letter: 卷标字符串,例如 'C'
+
+ 返回:
+ dict 包含上述信息
+ """
+
+ # 构造设备路径,格式为 \\.\C:
+ device_path = f"\\\\.\\{volume_letter}:"
+
+ # 打开卷设备(需要管理员权限)
+ handle = ctypes.windll.kernel32.CreateFileW(
+ device_path,
+ 0x80000000, # GENERIC_READ
+ 0x00000001 | 0x00000002, # FILE_SHARE_READ | FILE_SHARE_WRITE
+ None,
+ 3, # OPEN_EXISTING
+ 0,
+ None
+ )
+
+ if handle == -1:
+ raise PermissionError(f"无法打开卷 {volume_letter},请以管理员身份运行。")
+
+ try:
+ buffer = bytearray(512)
+ buffer_address = (ctypes.c_byte * len(buffer)).from_buffer(buffer)
+ bytes_read = ctypes.c_ulong(0)
+
+ # 读取第一个扇区(BPB / $Boot 扇区)
+ success = ctypes.windll.kernel32.ReadFile(
+ handle,
+ buffer_address,
+ len(buffer),
+ ctypes.byref(bytes_read),
+ None
+ )
+
+ if not success or bytes_read.value != 512:
+ raise RuntimeError("读取卷引导扇区失败。")
+
+ finally:
+ ctypes.windll.kernel32.CloseHandle(handle)
+
+ # 解析 Bytes Per Sector (偏移 0x0B,WORD 类型)
+ bytes_per_sector = int.from_bytes(buffer[0x0B:0x0D], byteorder='little')
+
+ # 解析 Sectors Per Cluster (偏移 0x0D,BYTE 类型)
+ sectors_per_cluster = buffer[0x0D]
+
+ # 计算簇大小
+ cluster_size = bytes_per_sector * sectors_per_cluster
+
+ return {
+ "BytesPerSector": bytes_per_sector,
+ "SectorsPerCluster": sectors_per_cluster,
+ "ClusterSize": cluster_size
+ }
+
+
+def InsertInfoToDBConfig(config_data, db_path='../src/db_ntfs_info.db', table_name='db_config'):
+ """
+ 将 NTFS 配置信息以键值对形式写入数据库的配置表中。
+
+ 参数:
+ config_data: dict,包含配置键值对
+ db_path: str,SQLite 数据库路径
+ table_name: str,目标表名(默认为 'db_config')
+
+ 返回:
+ None
+ """
+ # 连接到 SQLite 数据库(如果不存在则会自动创建)
+ conn = sqlite3.connect(db_path)
+ cursor = conn.cursor()
+
+ try:
+ # 创建表(如果不存在)
+ create_table_sql = f"""
+ CREATE TABLE IF NOT EXISTS {table_name} (
+ ID INTEGER PRIMARY KEY AUTOINCREMENT,
+ Key TEXT UNIQUE NOT NULL,
+ Value TEXT NOT NULL
+ );
+ """
+ cursor.execute(create_table_sql)
+
+ # 插入或替换数据(使用 INSERT OR REPLACE)
+ insert_sql = f"""
+ INSERT OR REPLACE INTO {table_name} (Key, Value)
+ VALUES (?, ?)
+ """
+
+ for key, value in config_data.items():
+ cursor.execute(insert_sql, (key, str(value)))
+
+ conn.commit()
+ print("键值对配置已成功写入数据库")
+
+ except Exception as e:
+ print(f"数据库操作失败: {e}")
+ conn.rollback()
+
+ finally:
+ conn.close()
+
+
+def main():
+ volume = "Z"
+ info = GetNTFSBootInfo(volume)
+ print(f"卷 {volume} 的 BPB 信息:")
+ print(info)
+ InsertInfoToDBConfig(info)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ntfs_utils/db_device.py b/ntfs_utils/db_device.py
new file mode 100644
index 0000000..c632f67
--- /dev/null
+++ b/ntfs_utils/db_device.py
@@ -0,0 +1,125 @@
+import sqlite3
+
+import psutil
+
+
+def ScanSpecialVolumes(volume_letter):
+ """
+ 扫描指定的单个磁盘卷,返回其基本信息字典。
+
+ 参数:
+ volume_letter: str,磁盘盘符(例如 "C", "Z")
+
+ 返回:
+ dict: 包含 Path, Type, Option 字段的字典
+ """
+ # 简单校验盘符格式(去除可能的冒号)
+ if len(volume_letter) >= 1 and volume_letter[-1] == ":":
+ volume_letter = volume_letter[:-1]
+
+ if not volume_letter or len(volume_letter) != 1 or not volume_letter.isalpha():
+ raise ValueError("无效的磁盘盘符,应为单个字母,如 'C' 或 'Z'")
+
+ return {
+ "Path": volume_letter.upper(),
+ "Type": "磁盘",
+ "Option": None
+ }
+
+
+def ScanNTFSVolumes():
+ """
+ 扫描当前系统中所有 NTFS 格式的磁盘卷。
+
+ 返回:
+ list of dict: 包含盘符等信息的字典列表
+ """
+ ntfs_volumes = []
+
+ for partition in psutil.disk_partitions():
+ if partition.fstype.upper() == 'NTFS':
+ # 提取盘符(去掉冒号)
+ drive_letter = partition.device[0] if len(partition.device) >= 2 and partition.device[1] == ':' else None
+ if drive_letter:
+ ntfs_volumes.append({
+ "Path": drive_letter,
+ "Type": "磁盘",
+ "Option": None
+ })
+
+ return ntfs_volumes
+
+
+def InsertVolumesToDB(data, db_path='../src/db_ntfs_info.db', table_name='db_device'):
+ """
+ 将 NTFS 磁盘信息写入数据库表,并防止重复插入。
+
+ 参数:
+ data: list of dict,包含 Path, Type, Option 字段的数据
+ db_path: str,SQLite 数据库路径
+ table_name: str,目标表名
+
+ 返回:
+ int: 成功插入的记录数
+ """
+ conn = sqlite3.connect(db_path)
+ cursor = conn.cursor()
+
+ try:
+ # 创建表(如果不存在),并添加 Path 的唯一性约束
+ create_table_sql = f"""
+ CREATE TABLE IF NOT EXISTS {table_name} (
+ ID INTEGER PRIMARY KEY AUTOINCREMENT,
+ Path TEXT NOT NULL UNIQUE,
+ Type TEXT NOT NULL CHECK(Type IN ('磁盘', '文件', '文件夹')),
+ Option TEXT
+ );
+ """
+ cursor.execute(create_table_sql)
+
+ inserted_count = 0
+ insert_sql = f"""
+ INSERT OR IGNORE INTO {table_name} (Path, Type, Option)
+ VALUES (?, ?, ?)
+ """
+
+ for item in data:
+ cursor.execute(insert_sql, (
+ item['Path'],
+ item['Type'],
+ item['Option']
+ ))
+ if cursor.rowcount > 0:
+ inserted_count += 1
+
+ conn.commit()
+ print(f"✅ 成功插入 {inserted_count} 条 NTFS 磁盘信息")
+ return inserted_count
+
+ except Exception as e:
+ print(f"❌ 数据库操作失败: {e}")
+ conn.rollback()
+ return 0
+
+ finally:
+ conn.close()
+
+
+def main():
+ # 扫描系统下所有 NTFS 磁盘
+ # volumes = ScanNTFSVolumes()
+ # print("🔍 找到以下 NTFS 磁盘:")
+ # for vol in volumes:
+ # print(vol)
+ #
+ # success_count = InsertVolumesToDB(volumes)
+ # print(f"共插入 {success_count} 条记录到数据库。")
+
+ # 扫描单个磁盘
+ volume_latter = "Z"
+ device_data = ScanSpecialVolumes(volume_latter)
+ InsertVolumesToDB([device_data])
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ntfs_utils/db_extend_name.py b/ntfs_utils/db_extend_name.py
new file mode 100644
index 0000000..eee831d
--- /dev/null
+++ b/ntfs_utils/db_extend_name.py
@@ -0,0 +1,68 @@
+import sqlite3
+
+
+def InsertExtensionsToDB(extensions, db_path='../src/db_ntfs_info.db', table_name='db_extend_name'):
+ """
+ 将扩展名列表插入到数据库中,自动忽略重复项。
+
+ 参数:
+ extensions: list of str,要插入的扩展名列表(如 ["txt", "jpg"])
+ db_path: str,SQLite 数据库路径
+ table_name: str,扩展名表名
+
+ 返回:
+ int: 成功插入的新记录数量
+ """
+ if not isinstance(extensions, list):
+ raise TypeError("extensions 必须是一个列表")
+
+ conn = sqlite3.connect(db_path)
+ cursor = conn.cursor()
+
+ try:
+ # 创建表(如果不存在)
+ create_table_sql = f"""
+ CREATE TABLE IF NOT EXISTS {table_name} (
+ ID INTEGER PRIMARY KEY AUTOINCREMENT,
+ ExtendName TEXT UNIQUE NOT NULL
+ );
+ """
+ cursor.execute(create_table_sql)
+
+ # 插入语句(忽略重复)
+ insert_sql = f"""
+ INSERT OR IGNORE INTO {table_name} (ExtendName)
+ VALUES (?)
+ """
+
+ # 构造插入数据格式
+ data_to_insert = [(ext.lower(),) for ext in extensions if ext.strip()]
+
+ # 批量插入
+ cursor.executemany(insert_sql, data_to_insert)
+ conn.commit()
+
+ inserted_count = cursor.rowcount
+ if inserted_count > 0:
+ print(f"✅ 成功插入 {inserted_count} 个扩展名")
+ else:
+ print("⚠️ 没有新的扩展名被插入(可能已存在)")
+
+ return inserted_count
+
+ except Exception as e:
+ print(f"❌ 插入失败: {e}")
+ conn.rollback()
+ return 0
+
+ finally:
+ conn.close()
+
+
+# 示例调用
+if __name__ == "__main__":
+ # 常见的文件扩展名列表(可选)
+ common_extensions = ["txt", "log", "csv", "xls", "xlsx", "doc", "docx"]
+
+ count = InsertExtensionsToDB(common_extensions)
+ print(f"共插入 {count} 个新扩展名。")
diff --git a/ntfs_utils/db_group.py b/ntfs_utils/db_group.py
new file mode 100644
index 0000000..8e73a24
--- /dev/null
+++ b/ntfs_utils/db_group.py
@@ -0,0 +1,65 @@
+import sqlite3
+
+
+def InsertGroupToDB(group_name_list, db_path='../src/db_ntfs_info.db', table_name='db_group'):
+ """
+ 向用户组表中插入多个组名。
+
+ 参数:
+ group_name_list: list of str,要插入的组名列表
+ db_path: str,SQLite 数据库路径
+ table_name: str,用户组表名
+
+ 返回:
+ int: 成功插入的记录数
+ """
+ if not isinstance(group_name_list, list):
+ raise TypeError("group_name_list 必须是一个列表")
+
+ conn = sqlite3.connect(db_path)
+ cursor = conn.cursor()
+
+ try:
+ # 创建表(如果不存在)
+ create_table_sql = f"""
+ CREATE TABLE IF NOT EXISTS {table_name} (
+ ID INTEGER PRIMARY KEY AUTOINCREMENT,
+ GroupName TEXT UNIQUE NOT NULL
+ );
+ """
+ cursor.execute(create_table_sql)
+
+ # 构建插入数据格式
+ insert_sql = f"""
+ INSERT OR IGNORE INTO {table_name} (GroupName)
+ VALUES (?)
+ """
+
+ data_to_insert = [(name,) for name in group_name_list]
+
+ # 批量插入
+ cursor.executemany(insert_sql, data_to_insert)
+ conn.commit()
+
+ inserted_count = cursor.rowcount
+ if inserted_count > 0:
+ print(f"✅ 成功插入 {inserted_count} 个组名")
+ else:
+ print("⚠️ 没有新的组名被插入(可能已存在)")
+
+ return inserted_count
+
+ except Exception as e:
+ print(f"❌ 插入失败: {e}")
+ conn.rollback()
+ return 0
+
+ finally:
+ conn.close()
+
+
+# 示例调用
+if __name__ == "__main__":
+ groups = ["Copier", "Admin", "Guest", "Developer"]
+ count = InsertGroupToDB(groups)
+ print(f"共插入 {count} 个新组名。")
diff --git a/ntfs_utils/db_path.py b/ntfs_utils/db_path.py
new file mode 100644
index 0000000..5d5abf6
--- /dev/null
+++ b/ntfs_utils/db_path.py
@@ -0,0 +1,178 @@
+import hashlib
+import os
+import sqlite3
+
+
+def GenerateHash(s: str) -> str:
+ """
+ 对输入字符串生成 SHA-256 哈希值。
+ 用于唯一标识一个路径(PathHash)。
+ """
+ return hashlib.sha256(s.encode('utf-8')).hexdigest()
+
+
+def ShouldSkipPath(path: str) -> bool:
+ """
+ 判断是否应跳过该路径(NTFS元文件或系统文件夹)。
+ """
+ name = os.path.basename(path)
+ if name.startswith('$'):
+ return True
+ if name == "System Volume Information":
+ return True
+ return False
+
+
+def ScanVolume(volume_letter: str):
+ """
+ 完整扫描指定磁盘的所有文件和目录,忽略 NTFS 元文件和系统文件夹,
+ 并为每个节点分配 ParentID。
+
+ 返回:
+ list of dict:包含文件/目录信息的字典列表
+ """
+ root_path = f"{volume_letter.upper()}:\\"
+ if not os.path.exists(root_path):
+ raise ValueError(f"磁盘 {root_path} 不存在")
+
+ result = []
+ path_to_id = {} # 用于记录路径到数据库 ID 的映射
+ counter = 1 # 模拟数据库自增 ID
+
+ for root, dirs, files in os.walk(root_path, topdown=True, onerror=None, followlinks=False):
+ # 过滤掉需要跳过的目录
+ dirs[:] = [d for d in dirs if not ShouldSkipPath(os.path.join(root, d))]
+
+ for entry in files + dirs:
+ full_path = os.path.join(root, entry)
+
+ if ShouldSkipPath(full_path):
+ continue
+
+ try:
+ if os.path.isdir(full_path):
+ is_dir = 1
+ bytes_size = 0
+ elif os.path.isfile(full_path):
+ is_dir = 0
+ bytes_size = os.path.getsize(full_path)
+ else:
+ continue
+
+ name = entry
+
+ # ✅ 修正点:对 Path 字段进行哈希
+ path_hash = GenerateHash(full_path)
+
+ # 计算 ContentSize(KB),小文件至少显示为 1 KB
+ content_size = bytes_size // 1024
+ if content_size == 0 and bytes_size > 0:
+ content_size = 1
+
+ # 获取父目录路径
+ parent_path = os.path.dirname(full_path)
+ parent_id = path_to_id.get(parent_path, 0) # 默认为 0(根目录可能未录入)
+
+ item = {
+ "ID": counter,
+ "Path": full_path,
+ "Name": name,
+ "PathHash": path_hash,
+ "IsDir": is_dir,
+ "ParentID": parent_id,
+ "ContentSize": content_size
+ }
+
+ result.append(item)
+ path_to_id[full_path] = counter
+ counter += 1
+
+ except Exception as e:
+ print(f"⚠️ 跳过路径 {full_path},错误: {e}")
+
+ return result
+
+
+def InsertPathDataToDB(data, db_path='../src/db_ntfs_info.db', table_name='db_path', batch_size=20):
+ """
+ 批量将扫描结果写入数据库。
+ """
+ conn = sqlite3.connect(db_path)
+ cursor = conn.cursor()
+
+ try:
+ # 创建表(如果不存在)
+ create_table_sql = f"""
+ CREATE TABLE IF NOT EXISTS {table_name} (
+ ID INTEGER PRIMARY KEY AUTOINCREMENT,
+ Path TEXT NOT NULL,
+ Name TEXT NOT NULL,
+ PathHash TEXT UNIQUE NOT NULL,
+ IsDir INTEGER NOT NULL CHECK(IsDir IN (0, 1)),
+ ParentID INTEGER,
+ ContentSize INTEGER,
+
+ FOREIGN KEY(ParentID) REFERENCES {table_name}(ID)
+ );
+ """
+ cursor.execute(create_table_sql)
+
+ # 插入语句(忽略重复 PathHash)
+ insert_sql = f"""
+ INSERT OR IGNORE INTO {table_name}
+ (Path, Name, PathHash, IsDir, ParentID, ContentSize)
+ VALUES (?, ?, ?, ?, ?, ?)
+ """
+
+ total_inserted = 0
+ batch = []
+
+ for item in data:
+ batch.append((
+ item['Path'],
+ item['Name'],
+ item['PathHash'],
+ item['IsDir'],
+ item['ParentID'] or 0,
+ item['ContentSize']
+ ))
+
+ if len(batch) >= batch_size:
+ cursor.executemany(insert_sql, batch)
+ conn.commit()
+ total_inserted += cursor.rowcount
+ print(f"✅ 提交一批 {len(batch)} 条数据")
+ batch.clear()
+
+ # 插入剩余数据
+ if batch:
+ cursor.executemany(insert_sql, batch)
+ conn.commit()
+ total_inserted += cursor.rowcount
+ print(f"✅ 提交最后一批 {len(batch)} 条数据")
+
+ print(f"✅ 总共插入 {total_inserted} 条记录到数据库。")
+
+ except Exception as e:
+ print(f"❌ 插入失败: {e}")
+ conn.rollback()
+
+ finally:
+ conn.close()
+
+
+# 示例主函数
+def main():
+ volume_letter = "Z"
+
+ print(f"🔍 开始全盘扫描磁盘 {volume_letter}:\\ ...")
+ scanned_data = ScanVolume(volume_letter)
+
+ print(f"📊 共扫描到 {len(scanned_data)} 条有效记录,开始入库...")
+ InsertPathDataToDB(scanned_data)
+
+ print("✅ 全盘扫描与入库完成")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ntfs_utils/db_user.py b/ntfs_utils/db_user.py
new file mode 100644
index 0000000..54f41ae
--- /dev/null
+++ b/ntfs_utils/db_user.py
@@ -0,0 +1,66 @@
+import sqlite3
+
+
+def InsertUserToDB(username_list, db_path='../src/db_ntfs_info.db', table_name='db_user'):
+ """
+ 向用户表中插入多个用户名。
+
+ 参数:
+ username_list: list of str,要插入的用户名列表
+ db_path: str,SQLite 数据库路径
+ table_name: str,用户表名
+
+ 返回:
+ int: 成功插入的新用户数量
+ """
+ if not isinstance(username_list, list):
+ raise TypeError("username_list 必须是一个列表")
+
+ conn = sqlite3.connect(db_path)
+ cursor = conn.cursor()
+
+ try:
+ # 创建表(如果不存在)
+ create_table_sql = f"""
+ CREATE TABLE IF NOT EXISTS {table_name} (
+ ID INTEGER PRIMARY KEY AUTOINCREMENT,
+ UserName TEXT UNIQUE NOT NULL
+ );
+ """
+ cursor.execute(create_table_sql)
+
+ # 插入语句(忽略重复)
+ insert_sql = f"""
+ INSERT OR IGNORE INTO {table_name} (UserName)
+ VALUES (?)
+ """
+
+ # 构建数据格式
+ data_to_insert = [(name,) for name in username_list]
+
+ # 批量插入
+ cursor.executemany(insert_sql, data_to_insert)
+ conn.commit()
+
+ inserted_count = cursor.rowcount
+ if inserted_count > 0:
+ print(f"✅ 成功插入 {inserted_count} 个用户")
+ else:
+ print("⚠️ 没有新的用户被插入(可能已存在)")
+
+ return inserted_count
+
+ except Exception as e:
+ print(f"❌ 插入失败: {e}")
+ conn.rollback()
+ return 0
+
+ finally:
+ conn.close()
+
+
+# 示例调用
+if __name__ == "__main__":
+ users = ["Alice", "Bob", "Charlie", "David"]
+ count = InsertUserToDB(users)
+ print(f"共插入 {count} 个新用户。")
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 0000000..7ef3911
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,9 @@
+[project]
+name = "fastcopy"
+version = "0.1.0"
+description = "Add your description here"
+readme = "README.md"
+requires-python = ">=3.12"
+dependencies = [
+ "psutil>=7.0.0",
+]
diff --git a/src/db_ntfs_info.db b/src/db_ntfs_info.db
new file mode 100644
index 0000000..ce7cfdf
Binary files /dev/null and b/src/db_ntfs_info.db differ
diff --git a/uv.lock b/uv.lock
new file mode 100644
index 0000000..f18176d
--- /dev/null
+++ b/uv.lock
@@ -0,0 +1,29 @@
+version = 1
+revision = 2
+requires-python = ">=3.12"
+
+[[package]]
+name = "fastcopy"
+version = "0.1.0"
+source = { virtual = "." }
+dependencies = [
+ { name = "psutil" },
+]
+
+[package.metadata]
+requires-dist = [{ name = "psutil", specifier = ">=7.0.0" }]
+
+[[package]]
+name = "psutil"
+version = "7.0.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/2a/80/336820c1ad9286a4ded7e845b2eccfcb27851ab8ac6abece774a6ff4d3de/psutil-7.0.0.tar.gz", hash = "sha256:7be9c3eba38beccb6495ea33afd982a44074b78f28c434a1f51cc07fd315c456", size = 497003, upload-time = "2025-02-13T21:54:07.946Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/ed/e6/2d26234410f8b8abdbf891c9da62bee396583f713fb9f3325a4760875d22/psutil-7.0.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:101d71dc322e3cffd7cea0650b09b3d08b8e7c4109dd6809fe452dfd00e58b25", size = 238051, upload-time = "2025-02-13T21:54:12.36Z" },
+ { url = "https://files.pythonhosted.org/packages/04/8b/30f930733afe425e3cbfc0e1468a30a18942350c1a8816acfade80c005c4/psutil-7.0.0-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:39db632f6bb862eeccf56660871433e111b6ea58f2caea825571951d4b6aa3da", size = 239535, upload-time = "2025-02-13T21:54:16.07Z" },
+ { url = "https://files.pythonhosted.org/packages/2a/ed/d362e84620dd22876b55389248e522338ed1bf134a5edd3b8231d7207f6d/psutil-7.0.0-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1fcee592b4c6f146991ca55919ea3d1f8926497a713ed7faaf8225e174581e91", size = 275004, upload-time = "2025-02-13T21:54:18.662Z" },
+ { url = "https://files.pythonhosted.org/packages/bf/b9/b0eb3f3cbcb734d930fdf839431606844a825b23eaf9a6ab371edac8162c/psutil-7.0.0-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b1388a4f6875d7e2aff5c4ca1cc16c545ed41dd8bb596cefea80111db353a34", size = 277986, upload-time = "2025-02-13T21:54:21.811Z" },
+ { url = "https://files.pythonhosted.org/packages/eb/a2/709e0fe2f093556c17fbafda93ac032257242cabcc7ff3369e2cb76a97aa/psutil-7.0.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5f098451abc2828f7dc6b58d44b532b22f2088f4999a937557b603ce72b1993", size = 279544, upload-time = "2025-02-13T21:54:24.68Z" },
+ { url = "https://files.pythonhosted.org/packages/50/e6/eecf58810b9d12e6427369784efe814a1eec0f492084ce8eb8f4d89d6d61/psutil-7.0.0-cp37-abi3-win32.whl", hash = "sha256:ba3fcef7523064a6c9da440fc4d6bd07da93ac726b5733c29027d7dc95b39d99", size = 241053, upload-time = "2025-02-13T21:54:34.31Z" },
+ { url = "https://files.pythonhosted.org/packages/50/1b/6921afe68c74868b4c9fa424dad3be35b095e16687989ebbb50ce4fceb7c/psutil-7.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:4cf3d4eb1aa9b348dec30105c55cd9b7d4629285735a102beb4441e38db90553", size = 244885, upload-time = "2025-02-13T21:54:37.486Z" },
+]