Skip to main content

rpfm_lib/files/table/
local.rs

1//---------------------------------------------------------------------------//
2// Copyright (c) 2017-2026 Ismael Gutiérrez González. All rights reserved.
3//
4// This file is part of the Rusted PackFile Manager (RPFM) project,
5// which can be found here: https://github.com/Frodo45127/rpfm.
6//
7// This file is licensed under the MIT license, which can be found here:
8// https://github.com/Frodo45127/rpfm/blob/master/LICENSE.
9//---------------------------------------------------------------------------//
10
11//! In-memory table implementation with import/export capabilities.
12//!
13//! This module provides [`TableInMemory`], the primary concrete implementation of the
14//! [`Table`] trait. It stores all table data in memory and supports multiple serialization
15//! formats for data exchange.
16//!
17//! # TableInMemory
18//!
19//! [`TableInMemory`] is the standard table implementation used throughout RPFM. It holds:
20//! - Complete table data as `Vec<Vec<DecodedData>>`
21//! - Schema definition and patches
22//! - Metadata (table name, altered flag)
23//!
24//! ## Creation and Loading
25//!
26//! Tables can be created in several ways:
27//!
28//! ```rust
29//! # use rpfm_lib::files::table::local::TableInMemory;
30//! # use rpfm_lib::schema::Definition;
31//! # fn example(definition: &Definition) {
32//! // Create empty table from definition
33//! let table = TableInMemory::new(definition, None, "units_tables");
34//!
35//! // Decode from binary data (most common)
36//! // let table = TableInMemory::decode(&mut data, definition, &patches, Some(entry_count), false, "units_tables")?;
37//! # }
38//! ```
39//!
40//! # Supported Formats
41//!
42//! ## Binary Encoding
43//! - **decode/encode**: Native Total War binary format
44//! - Used for reading/writing binary files in PackFiles
45//! - Compact, optimized for game performance
46//!
47//! ## TSV (Tab-Separated Values)
48//! - **tsv_import/tsv_export**: Human-readable text format
49//! - First line: metadata (`#table_name;version;path`)
50//! - Second line: column names
51//! - Remaining lines: data rows
52//! - Handles special characters via escape sequences
53//! - Key columns can be exported first for readability
54//!
55//! ## SQLite Database (optional, feature-gated)
56//! - **db_to_sql/sql_to_db**: Store tables in SQLite for complex queries
57//! - Each table version gets its own SQL table (`tablename_v123`)
58//! - Tracks pack name, file name, vanilla status
59//! - Useful for cross-table analysis and searching
60//!
61//! # Schema Migration
62//!
63//! The [`set_definition`](Table::set_definition) method enables **schema version migration**:
64//! - Columns are mapped by name (not position)
65//! - New columns get default values
66//! - Removed columns are dropped
67//! - Type changes trigger automatic conversion
68//! - Data integrity is preserved where possible
69//!
70//! This allows tables from older game versions to be updated to newer schemas.
71//!
72//! # Data Integrity
73//!
74//! The `altered` flag tracks if data was modified during decoding:
75//! - Set to `true` if invalid numeric values were clamped
76//! - Set to `true` if type conversions occurred
77//! - Used to warn users about potential data corruption
78//!
79//! # Implementation Details
80//!
81//! - Uses `getset` for accessor generation
82//! - Implements `Clone`, `Debug`, `PartialEq` for testability
83//! - Serializable via serde for IPC and caching
84//! - Thread-safe through `Table` trait's `Send + Sync` requirement
85
86use base64::{Engine, engine::general_purpose::STANDARD};
87use csv::{StringRecordsIter, Writer};
88use getset::*;
89#[cfg(feature = "integration_sqlite")]use r2d2::Pool;
90#[cfg(feature = "integration_sqlite")]use r2d2_sqlite::SqliteConnectionManager;
91#[cfg(feature = "integration_sqlite")]use rusqlite::params_from_iter;
92use serde_derive::{Serialize, Deserialize};
93
94use std::borrow::Cow;
95use std::collections::HashMap;
96use std::fs::File;
97
98use crate::binary::{ReadBytes, WriteBytes};
99use crate::error::{Result, RLibError};
100use crate::files::table::DecodedData;
101//#[cfg(feature = "integration_log")] use crate::integrations::log::{info, warn};
102use crate::schema::{Definition, DefinitionPatch, FieldType};
103use crate::utils::parse_str_as_bool;
104
105use super::{Table, decode_table, encode_table};
106
107//---------------------------------------------------------------------------//
108//                              Enum & Structs
109//---------------------------------------------------------------------------//
110
111/// In-memory representation of a decoded table with full data and schema.
112///
113/// This is the primary table implementation in RPFM, storing all table rows in memory
114/// along with their schema definition. Tables are typically accessed through the
115/// [`Table`] trait interface rather than directly.
116///
117/// # Fields
118///
119/// - **table_name**: Identifies the table type (e.g., "units_tables", "buildings_tables")
120/// - **definition**: Complete schema definition including column types and constraints
121/// - **definition_patch**: Runtime modifications to the base schema for this specific table
122/// - **table_data**: All table rows as a `Vec<Vec<DecodedData>>` (outer vector is rows, inner is columns)
123/// - **altered**: Flag indicating if data was modified during decoding (e.g., invalid values corrected)
124///
125/// # Accessors
126///
127/// The struct uses the `getset` macro for automatic accessor generation:
128/// - `table_name()` / `set_table_name()`: Public getters/setters via getset
129/// - Schema and data: Accessed through [`Table`] trait methods for type safety
130///
131/// # Thread Safety
132///
133/// This struct implements `Send + Sync` (via the `Table` trait requirement), allowing
134/// safe concurrent read access and message passing between threads.
135#[derive(Clone, Debug, PartialEq, Getters, Setters, Serialize, Deserialize)]
136#[getset(get = "pub", set = "pub")]
137pub struct TableInMemory {
138
139    /// Table type identifier (e.g., "units_tables").
140    table_name: String,
141
142    /// Schema definition for this table.
143    #[getset(skip)]
144    definition: Definition,
145
146    /// Runtime schema modifications specific to this table instance.
147    #[getset(skip)]
148    definition_patch: DefinitionPatch,
149
150    /// All table rows (outer vector is rows, inner is columns)
151    #[getset(skip)]
152    table_data: Vec<Vec<DecodedData>>,
153
154    /// Flag indicating data was altered during decoding (e.g., invalid values corrected).
155    altered: bool,
156}
157
158//----------------------------------------------------------------//
159// Implementations for `Table`.
160//----------------------------------------------------------------//
161
162impl TableInMemory {
163
164    /// Creates a new empty table from a schema definition.
165    ///
166    /// Initializes a table with no rows but with a complete schema definition.
167    /// This is typically used when creating new tables from scratch or before
168    /// importing data from external sources.
169    ///
170    /// # Parameters
171    ///
172    /// - `definition`: Schema defining column structure, types, and constraints
173    /// - `definition_patch`: Optional runtime modifications to the base schema
174    /// - `table_name`: Table type identifier (e.g., "units_tables")
175    ///
176    /// # Examples
177    ///
178    /// ```ignore
179    /// # use rpfm_lib::files::table::{local::TableInMemory, Table};
180    /// # use rpfm_lib::schema::Definition;
181    /// # fn example(definition: &Definition) {
182    /// // Create empty table for manual data entry
183    /// let mut table = TableInMemory::new(definition, None, "units_tables");
184    ///
185    /// // Add rows using the Table trait methods
186    /// let new_row = table.new_row();
187    /// table.data_mut().push(new_row);
188    /// # }
189    /// ```
190    pub fn new(definition: &Definition, definition_patch: Option<&DefinitionPatch>, table_name: &str) -> Self {
191        let table_data = vec![];
192        let definition_patch = if let Some(patch) = definition_patch { patch.clone() } else { HashMap::new() };
193
194        Self {
195            definition: definition.clone(),
196            definition_patch,
197            table_name: table_name.to_owned(),
198            table_data,
199            altered: false,
200        }
201    }
202
203    /// Decodes a table from binary data using the provided schema.
204    ///
205    /// This is the primary method for loading tables from PackFiles. It reads the binary
206    /// format used by Total War games and converts it into an in-memory representation.
207    ///
208    /// # Parameters
209    ///
210    /// - `data`: Binary data reader positioned at the table start
211    /// - `definition`: Schema definition for interpreting the binary data
212    /// - `definition_patch`: Runtime schema modifications
213    /// - `entry_count`: Optional row count (if `None`, reads from data stream)
214    /// - `return_incomplete`: If `true`, returns partial data on decode errors instead of failing
215    /// - `table_name`: Table type identifier
216    ///
217    /// # Behavior
218    ///
219    /// - Reads entry count from stream if not provided
220    /// - Decodes each row according to schema field definitions
221    /// - Sets `altered` flag if invalid data is corrected during decoding
222    /// - Can return incomplete tables for error recovery if requested
223    ///
224    /// # Errors
225    ///
226    /// Returns an error if:
227    /// - Binary data is corrupted or truncated
228    /// - Data types don't match schema expectations
229    /// - Field decoding fails (unless `return_incomplete` is true)
230    ///
231    /// # Examples
232    ///
233    /// ```ignore
234    /// # use rpfm_lib::files::table::local::TableInMemory;
235    /// # use rpfm_lib::schema::Definition;
236    /// # use rpfm_lib::binary::ReadBytes;
237    /// # use std::collections::HashMap;
238    /// # fn example<R: ReadBytes>(data: &mut R, definition: &Definition) -> anyhow::Result<()> {
239    /// // Decode table from binary PackFile data
240    /// let table = TableInMemory::decode(
241    ///     data,
242    ///     definition,
243    ///     &HashMap::new(),  // No patches
244    ///     Some(100),        // 100 entries
245    ///     false,            // Fail on errors
246    ///     "units_tables"
247    /// )?;
248    /// # Ok(())
249    /// # }
250    /// ```
251    pub fn decode<R: ReadBytes>(
252        data: &mut R,
253        definition: &Definition,
254        definition_patch: &DefinitionPatch,
255        entry_count: Option<u32>,
256        return_incomplete: bool,
257        table_name: &str,
258    ) -> Result<Self> {
259
260        let mut altered = false;
261        let table_data = decode_table(data, definition, entry_count, return_incomplete, &mut altered)?;
262        let table = Self {
263            definition: definition.clone(),
264            definition_patch: definition_patch.clone(),
265            table_name: table_name.to_owned(),
266            table_data,
267            altered
268        };
269
270        Ok(table)
271    }
272
273    /// Encodes the table to binary format for writing to PackFiles.
274    ///
275    /// Converts the in-memory table representation back to the binary format used
276    /// by Total War games. This is the inverse of [`decode`](Self::decode).
277    ///
278    /// # Parameters
279    ///
280    /// - `data`: Binary writer to receive the encoded table
281    ///
282    /// # Format
283    ///
284    /// The binary output includes:
285    /// - Entry count (u32)
286    /// - Row data encoded according to field types
287    /// - Applied schema patches are used during encoding
288    ///
289    /// # Errors
290    ///
291    /// Returns an error if:
292    /// - Writing to the output stream fails
293    /// - Data contains values that cannot be encoded in the target type
294    ///
295    /// # Examples
296    ///
297    /// ```ignore
298    /// # use rpfm_lib::files::table::local::TableInMemory;
299    /// # use rpfm_lib::binary::WriteBytes;
300    /// # fn example<W: WriteBytes>(table: &TableInMemory, output: &mut W) -> anyhow::Result<()> {
301    /// // Encode table for saving to PackFile
302    /// table.encode(output)?;
303    /// # Ok(())
304    /// # }
305    /// ```
306    pub fn encode<W: WriteBytes>(&self, data: &mut W) -> Result<()> {
307        encode_table(&self.data(), data, self.definition(), &Some(self.patches()))
308    }
309
310    //----------------------------------------------------------------//
311    // TSV Functions for tables.
312    //----------------------------------------------------------------//
313    // TODO: Make tsv trait.
314
315    /// Imports a table from TSV (Tab-Separated Values) format.
316    ///
317    /// Parses TSV data and creates a new table with the imported rows. The TSV format
318    /// is human-readable and editable, making it useful for bulk edits and external
319    /// tools integration.
320    ///
321    /// # TSV Format
322    ///
323    /// Expected format:
324    /// 1. **Header row**: Column names (must match schema field names)
325    /// 2. **Metadata row**: `#table_name;version;path` (optional, can be skipped)
326    /// 3. **Data rows**: Tab-separated values, one row per line
327    ///
328    /// # Parameters
329    ///
330    /// - `records`: CSV record iterator from the TSV file
331    /// - `definition`: Schema definition for validation
332    /// - `field_order`: Maps column indices to field names from the header
333    /// - `table_name`: Table type identifier
334    /// - `schema_patches`: Optional schema modifications
335    ///
336    /// # Column Mapping
337    ///
338    /// Columns are matched by name (not position), allowing:
339    /// - Reordered columns in TSV files
340    /// - Missing columns (filled with defaults)
341    /// - Extra columns in TSV (ignored)
342    ///
343    /// # Type Parsing
344    ///
345    /// - **Booleans**: "true"/"false", "1"/"0", "yes"/"no" (case-insensitive)
346    /// - **Numbers**: Standard decimal format, scientific notation for floats
347    /// - **Colors**: Hexadecimal strings (e.g., "FF0000")
348    /// - **Sequences**: Base64-encoded binary data
349    ///
350    /// # Errors
351    ///
352    /// Returns an error if:
353    /// - TSV format is invalid (wrong delimiter, malformed rows)
354    /// - Data cannot be parsed to the expected type
355    /// - Required columns are missing
356    ///
357    /// Error includes row and column numbers for debugging: `ImportTSVIncorrectRow(row, col)`
358    pub(crate) fn tsv_import(records: StringRecordsIter<File>, definition: &Definition, field_order: &HashMap<u32, String>, table_name: &str, schema_patches: Option<&DefinitionPatch>) -> Result<Self> {
359        let mut table = Self::new(definition, schema_patches, table_name);
360        let mut entries = vec![];
361
362        let fields_processed = definition.fields_processed();
363
364        for (row, record) in records.enumerate() {
365            match record {
366                Ok(record) => {
367                    let mut entry = table.new_row();
368                    for (column, field) in record.iter().enumerate() {
369
370                        // Get the column name from the header, and try to map it to a column in the table's.
371                        if let Some(column_name) = field_order.get(&(column as u32)) {
372                            if let Some(column_number) = fields_processed.iter().position(|x| x.name() == column_name) {
373
374                                entry[column_number] = match fields_processed[column_number].field_type() {
375                                    FieldType::Boolean => parse_str_as_bool(field).map(DecodedData::Boolean).map_err(|_| RLibError::ImportTSVIncorrectRow(row, column))?,
376                                    FieldType::F32 => DecodedData::F32(field.parse::<f32>().map_err(|_| RLibError::ImportTSVIncorrectRow(row, column))?),
377                                    FieldType::F64 => DecodedData::F64(field.parse::<f64>().map_err(|_| RLibError::ImportTSVIncorrectRow(row, column))?),
378                                    FieldType::I16 => DecodedData::I16(field.parse::<i16>().map_err(|_| RLibError::ImportTSVIncorrectRow(row, column))?),
379                                    FieldType::I32 => DecodedData::I32(field.parse::<i32>().map_err(|_| RLibError::ImportTSVIncorrectRow(row, column))?),
380                                    FieldType::I64 => DecodedData::I64(field.parse::<i64>().map_err(|_| RLibError::ImportTSVIncorrectRow(row, column))?),
381                                    FieldType::OptionalI16 => DecodedData::OptionalI16(field.parse::<i16>().map_err(|_| RLibError::ImportTSVIncorrectRow(row, column))?),
382                                    FieldType::OptionalI32 => DecodedData::OptionalI32(field.parse::<i32>().map_err(|_| RLibError::ImportTSVIncorrectRow(row, column))?),
383                                    FieldType::OptionalI64 => DecodedData::OptionalI64(field.parse::<i64>().map_err(|_| RLibError::ImportTSVIncorrectRow(row, column))?),
384                                    FieldType::ColourRGB => DecodedData::ColourRGB(if u32::from_str_radix(field, 16).is_ok() {
385                                        field.to_owned()
386                                    } else {
387                                        Err(RLibError::ImportTSVIncorrectRow(row, column))?
388                                    }),
389                                    FieldType::StringU8 => DecodedData::StringU8(field.to_owned()),
390                                    FieldType::StringU16 => DecodedData::StringU16(field.to_owned()),
391                                    FieldType::OptionalStringU8 => DecodedData::OptionalStringU8(field.to_owned()),
392                                    FieldType::OptionalStringU16 => DecodedData::OptionalStringU16(field.to_owned()),
393
394                                    // For now fail on Sequences. These are a bit special and I don't know if the're even possible in TSV.
395                                    FieldType::SequenceU16(_) => DecodedData::SequenceU16(STANDARD.decode(field).map_err(|_| RLibError::ImportTSVIncorrectRow(row, column))?),
396                                    FieldType::SequenceU32(_) => DecodedData::SequenceU32(STANDARD.decode(field).map_err(|_| RLibError::ImportTSVIncorrectRow(row, column))?),
397                                }
398                            }
399                        }
400                    }
401                    entries.push(entry);
402                }
403                Err(_) => return Err(RLibError::ImportTSVIncorrectRow(row, 0)),
404            }
405        }
406
407        // If we reached this point without errors, we replace the old data with the new one and return success.
408        table.set_data(&entries)?;
409        Ok(table)
410    }
411
412    /// Exports the table to TSV (Tab-Separated Values) format.
413    ///
414    /// Writes table data to a TSV file for human editing, version control, or
415    /// external tool processing. The output format is compatible with
416    /// [`tsv_import`](Self::tsv_import).
417    ///
418    /// # Parameters
419    ///
420    /// - `writer`: CSV writer configured for TSV output
421    /// - `table_path`: Original file path (stored in metadata row)
422    /// - `keys_first`: If `true`, sorts key columns to the left for easier reading
423    ///
424    /// # Output Format
425    ///
426    /// ```text
427    /// column1    column2    column3
428    /// #units_tables;5;db/units_tables/data.bin
429    /// value1     value2     value3
430    /// value4     value5     value6
431    /// ```
432    ///
433    /// - **Line 1**: Column names from schema
434    /// - **Line 2**: Metadata (`#table_name;version;path`) with padding cells
435    /// - **Lines 3+**: Data rows
436    ///
437    /// # Data Formatting
438    ///
439    /// - **Floats**: Fixed 4 decimal places (e.g., "3.1416")
440    /// - **Booleans**: "true" or "false"
441    /// - **Sequences**: Base64-encoded binary data
442    /// - **Special chars**: Newlines/tabs are escaped as `\\n`/`\\t`
443    ///
444    /// # Column Ordering
445    ///
446    /// If `keys_first` is `true`:
447    /// - Key columns appear first (left-most)
448    /// - Non-key columns follow
449    /// - Makes primary keys visible without horizontal scrolling
450    ///
451    /// # Errors
452    ///
453    /// Returns an error if writing to the file fails.
454    pub(crate) fn tsv_export(&self, writer: &mut Writer<File>, table_path: &str, keys_first: bool) -> Result<()> {
455
456        let fields_processed = self.definition().fields_processed();
457        let fields_sorted = self.definition().fields_processed_sorted(keys_first);
458        let fields_sorted_properly = fields_sorted.iter()
459            .map(|field_sorted| (fields_processed.iter().position(|field| field == field_sorted).unwrap(), field_sorted))
460            .collect::<Vec<(_,_)>>();
461
462        // We serialize the info of the table (name and version) in the first line, and the column names in the second one.
463        let metadata = (format!("#{};{};{}", self.table_name(), self.definition().version(), table_path), vec![String::new(); fields_sorted_properly.len() - 1]);
464        writer.serialize(fields_sorted_properly.iter().map(|(_, field)| field.name()).collect::<Vec<&str>>())?;
465        writer.serialize(metadata)?;
466
467        // Then we serialize each entry in the DB Table.
468        let entries = self.data();
469        for entry in &*entries {
470            let sorted_entry = fields_sorted_properly.iter()
471                .map(|(index, _)| entry[*index].data_to_string())
472                .collect::<Vec<Cow<str>>>();
473            writer.serialize(sorted_entry)?;
474        }
475
476        writer.flush().map_err(From::from)
477    }
478
479    //----------------------------------------------------------------//
480    // SQL functions for tables.
481    //----------------------------------------------------------------//
482
483    /// Inserts the table into a SQLite database for querying and analysis.
484    ///
485    /// Creates or updates a SQL table with this table's data, enabling complex queries,
486    /// cross-table joins, and full-text search. Each table version gets its own SQL table
487    /// named `tablename_v{version}`.
488    ///
489    /// # Parameters
490    ///
491    /// - `pool`: SQLite connection pool
492    /// - `pack_name`: Name of the PackFile containing this table
493    /// - `file_name`: Path to this table within the PackFile
494    /// - `is_vanilla_pack`: `true` if from official game data, `false` for mods
495    ///
496    /// # SQL Schema
497    ///
498    /// The created SQL table includes:
499    /// - `pack_name` (TEXT): Source PackFile identifier
500    /// - `file_name` (TEXT): Table path within PackFile
501    /// - `is_vanilla` (INTEGER): 1 for vanilla, 0 for mods
502    /// - Column for each schema field (types mapped from `FieldType`)
503    ///
504    /// # Behavior
505    ///
506    /// - Creates the SQL table if it doesn't exist (silently ignores if exists)
507    /// - Uses `INSERT OR REPLACE` to update existing rows
508    /// - Sequences are stored as BLOB for efficient binary storage
509    ///
510    /// # Use Cases
511    ///
512    /// - Finding all units with a specific ability across multiple mods
513    /// - Analyzing stat distributions (e.g., average unit cost)
514    /// - Detecting conflicts between mods
515    /// - Building searchable databases of game content
516    ///
517    /// # Feature Gate
518    ///
519    /// Only available with the `integration_sqlite` feature enabled.
520    #[cfg(feature = "integration_sqlite")]
521    pub fn db_to_sql(&self, pool: &Pool<SqliteConnectionManager>, pack_name: &str, file_name: &str, is_vanilla_pack: bool) -> Result<()> {
522
523        // Try to create the table, in case it doesn't exist yet. Ignore a failure here, as it'll mean the table already exists.
524        let params: Vec<String> = vec![];
525        let create_table = self.definition().map_to_sql_create_table_string(self.table_name());
526        match pool.get()?.execute(&create_table, params_from_iter(params)) {
527            Ok(_) => {
528                //#[cfg(feature = "integration_log")] {
529                //    info!("Table {} created succesfully.", self.table_name());
530                //}
531            },
532
533            Err(error) => {
534                //#[cfg(feature = "integration_log")] {
535                //    warn!("Table {} failed to be created: {error}", self.table_name());
536                //}
537            },
538        }
539
540        self.insert_all_to_sql(pool, pack_name, file_name, is_vanilla_pack)?;
541        Ok(())
542    }
543
544    /// Loads table data from a SQLite database.
545    ///
546    /// Retrieves previously stored table data from SQL and replaces the current
547    /// table's rows. This is the inverse of [`db_to_sql`](Self::db_to_sql).
548    ///
549    /// # Parameters
550    ///
551    /// - `pool`: SQLite connection pool
552    /// - `pack_name`: Name of the source PackFile
553    /// - `file_name`: Path to the table within the PackFile
554    ///
555    /// # Behavior
556    ///
557    /// - Queries the SQL table for rows matching `pack_name` and `file_name`
558    /// - Maintains row order via `ROWID`
559    /// - Converts SQL types back to `DecodedData` variants
560    /// - Replaces all current table data
561    ///
562    /// # Feature Gate
563    ///
564    /// Only available with the `integration_sqlite` feature enabled.
565    #[cfg(feature = "integration_sqlite")]
566    pub fn sql_to_db(&mut self, pool: &Pool<SqliteConnectionManager>, pack_name: &str, file_name: &str) -> Result<()> {
567        self.table_data = self.select_all_from_sql(pool, pack_name, file_name)?;
568        Ok(())
569    }
570
571    /// Inserts all table rows into a SQLite database.
572    ///
573    /// Converts each row's `DecodedData` fields to SQL-compatible values and performs
574    /// a bulk `INSERT OR REPLACE` operation. Sequence fields are passed as binary parameters.
575    ///
576    /// # Arguments
577    ///
578    /// * `pool` - SQLite connection pool.
579    /// * `pack_name` - Name of the pack containing this table.
580    /// * `file_name` - Path of the table file within the pack.
581    /// * `is_vanilla_pack` - Whether the pack is from the base game (stored as 1/0 flag).
582    #[cfg(feature = "integration_sqlite")]
583    fn insert_all_to_sql(&self, pool: &Pool<SqliteConnectionManager>, pack_name: &str, file_name: &str, is_vanilla_pack: bool) -> Result<()> {
584        let mut params = vec![];
585        let values = self.table_data.iter().map(|row| {
586            format!("(\"{}\", \"{}\", {}, {})",
587                pack_name,
588                file_name,
589                if is_vanilla_pack { "1" } else { "0" },
590                row.iter().map(|field| {
591                match field {
592                    DecodedData::Boolean(data) => if *data { "1".to_owned() } else { "0".to_owned() },
593                    DecodedData::F32(data) => format!("{data:.4}"),
594                    DecodedData::F64(data) => format!("{data:.4}"),
595                    DecodedData::I16(data) => format!("\"{data}\""),
596                    DecodedData::I32(data) => format!("\"{data}\""),
597                    DecodedData::I64(data) => format!("\"{data}\""),
598                    DecodedData::ColourRGB(data) => format!("\"{}\"", data.replace('\"', "\"\"")),
599                    DecodedData::StringU8(data) => format!("\"{}\"", data.replace('\"', "\"\"")),
600                    DecodedData::StringU16(data) => format!("\"{}\"", data.replace('\"', "\"\"")),
601                    DecodedData::OptionalI16(data) => format!("\"{data}\""),
602                    DecodedData::OptionalI32(data) => format!("\"{data}\""),
603                    DecodedData::OptionalI64(data) => format!("\"{data}\""),
604                    DecodedData::OptionalStringU8(data) => format!("\"{}\"", data.replace('\"', "\"\"")),
605                    DecodedData::OptionalStringU16(data) => format!("\"{}\"", data.replace('\"', "\"\"")),
606                    DecodedData::SequenceU16(data) => {
607                        params.push(data.to_vec());
608                        "?".to_owned()
609                    },
610                    DecodedData::SequenceU32(data) => {
611                        params.push(data.to_vec());
612                        "?".to_owned()
613                    },
614                }
615            }).collect::<Vec<_>>().join(","))
616        }).collect::<Vec<_>>().join(",");
617
618        // If there are no values, don't bother with the query.
619        if values.is_empty() {
620            return Ok(());
621        }
622
623        let query = format!("INSERT OR REPLACE INTO \"{}_v{}\" {} VALUES {}",
624            self.table_name().replace('\"', "'"),
625            self.definition().version(),
626            self.definition().map_to_sql_insert_into_string(),
627            values
628        );
629
630        pool.get()?.execute(&query, params_from_iter(params.iter()))
631            .map(|_| ())
632            .map_err(From::from)
633    }
634
635    /// Retrieves all table rows from a SQLite database.
636    ///
637    /// Queries the database for rows matching the pack and file name, converting
638    /// SQL values back to `DecodedData` fields based on the table definition.
639    ///
640    /// # Arguments
641    ///
642    /// * `pool` - SQLite connection pool.
643    /// * `pack_name` - Name of the pack containing this table.
644    /// * `file_name` - Path of the table file within the pack.
645    ///
646    /// # Returns
647    ///
648    /// A vector of rows, each containing decoded field data in column order.
649    #[cfg(feature = "integration_sqlite")]
650    fn select_all_from_sql(&self, pool: &Pool<SqliteConnectionManager>, pack_name: &str, file_name: &str) -> Result<Vec<Vec<DecodedData>>> {
651        let definition = self.definition();
652        let fields_processed = definition.fields_processed();
653
654        let field_names = fields_processed.iter().map(|field| field.name()).collect::<Vec<&str>>().join(",");
655        let query = format!("SELECT {} FROM \"{}_v{}\" WHERE pack_name = \"{}\" AND file_name = \"{}\" order by ROWID",
656            field_names,
657            self.table_name().replace('\"', "'"),
658            definition.version(),
659            pack_name,
660            file_name
661        );
662
663        let conn = pool.get()?;
664        let mut stmt = conn.prepare(&query)?;
665        let rows = stmt.query_map([], |row| {
666            let mut data = Vec::with_capacity(fields_processed.len());
667            for (i, field) in fields_processed.iter().enumerate() {
668                data.push(match field.field_type() {
669                    FieldType::Boolean => DecodedData::Boolean(row.get(i)?),
670                    FieldType::F32 => DecodedData::F32(row.get(i)?),
671                    FieldType::F64 => DecodedData::F64(row.get(i)?),
672                    FieldType::I16 => DecodedData::I16(row.get(i)?),
673                    FieldType::I32 => DecodedData::I32(row.get(i)?),
674                    FieldType::I64 => DecodedData::I64(row.get(i)?),
675                    FieldType::ColourRGB => DecodedData::ColourRGB(row.get(i)?),
676                    FieldType::StringU8 => DecodedData::StringU8(row.get(i)?),
677                    FieldType::StringU16 => DecodedData::StringU16(row.get(i)?),
678                    FieldType::OptionalI16 => DecodedData::OptionalI16(row.get(i)?),
679                    FieldType::OptionalI32 => DecodedData::OptionalI32(row.get(i)?),
680                    FieldType::OptionalI64 => DecodedData::OptionalI64(row.get(i)?),
681                    FieldType::OptionalStringU8 => DecodedData::OptionalStringU8(row.get(i)?),
682                    FieldType::OptionalStringU16 => DecodedData::OptionalStringU16(row.get(i)?),
683                    FieldType::SequenceU16(_) => DecodedData::SequenceU16(row.get(i)?),
684                    FieldType::SequenceU32(_) => DecodedData::SequenceU32(row.get(i)?),
685                });
686            }
687
688            Ok(data)
689        })?;
690
691        let mut data = vec![];
692        for row in rows {
693            data.push(row?);
694        }
695
696        Ok(data)
697    }
698
699    /// Counts the number of rows in a table matching a unique identifier.
700    ///
701    /// # Arguments
702    ///
703    /// * `pool` - SQLite connection pool.
704    /// * `table_name` - Name of the table type (e.g., "units_tables").
705    /// * `table_version` - Schema version of the table.
706    /// * `table_unique_id` - Unique identifier to filter rows.
707    ///
708    /// # Returns
709    ///
710    /// The count of matching rows in the database.
711    #[cfg(feature = "integration_sqlite")]
712    pub fn count_table(
713        pool: &Pool<SqliteConnectionManager>,
714        table_name: &str,
715        table_version: i32,
716        table_unique_id: u64,
717    ) -> Result<u64> {
718        let query = format!("SELECT COUNT(*) FROM \"{}_v{}\" WHERE table_unique_id = {}",
719            table_name.replace('\"', "'"),
720            table_version,
721            table_unique_id
722        );
723
724        let conn = pool.get()?;
725        let mut stmt = conn.prepare(&query)?;
726        let mut rows = stmt.query([])?;
727        let mut count = 0;
728        if let Some(row) = rows.next()? {
729            count = row.get(0)?;
730        }
731
732        Ok(count)
733    }
734}
735
736impl Table for TableInMemory {
737    fn name(&self) -> &str {
738        &self.table_name
739    }
740
741    fn definition(&self) -> &Definition {
742        &self.definition
743    }
744
745    fn patches(&self) -> &DefinitionPatch {
746        &self.definition_patch
747    }
748
749    fn data(&'_ self) -> Cow<'_, [Vec<DecodedData>]> {
750        Cow::from(&self.table_data)
751    }
752
753    fn data_mut(&mut self) -> &mut Vec<Vec<DecodedData>> {
754        &mut self.table_data
755    }
756
757    fn set_name(&mut self, val: String) {
758        self.table_name = val;
759    }
760
761    fn set_definition(&mut self, new_definition: &Definition) {
762
763        // It's simple: we compare both schemas, and get the original and final positions of each column.
764        // If a column is new, his original position is -1. If has been removed, his final position is -1.
765        let mut positions: Vec<(i32, i32)> = vec![];
766        let new_fields_processed = new_definition.fields_processed();
767        let old_fields_processed = self.definition.fields_processed();
768
769        for (new_pos, new_field) in new_fields_processed.iter().enumerate() {
770            if let Some(old_pos) = old_fields_processed.iter().position(|x| x.name() == new_field.name()) {
771                positions.push((old_pos as i32, new_pos as i32))
772            } else { positions.push((-1, new_pos as i32)); }
773        }
774
775        // Then, for each field in the old definition, check if exists in the new one.
776        for (old_pos, old_field) in old_fields_processed.iter().enumerate() {
777            if !new_fields_processed.iter().any(|x| x.name() == old_field.name()) { positions.push((old_pos as i32, -1)); }
778        }
779
780        // We sort the columns by their destination.
781        positions.sort_by_key(|x| x.1);
782
783        // Then, we create the new data using the old one and the column changes.
784        let mut new_entries: Vec<Vec<DecodedData>> = Vec::with_capacity(self.table_data.len());
785        for row in self.table_data.iter() {
786            let mut entry = vec![];
787            for (old_pos, new_pos) in &positions {
788
789                // If the new position is -1, it means the column got removed. We skip it.
790                if *new_pos == -1 { continue; }
791
792                // If the old position is -1, it means we got a new column. We need to get his type and create a `Default` field with it.
793                else if *old_pos == -1 {
794                    let field_type = new_fields_processed[*new_pos as usize].field_type();
795                    let default_value = new_fields_processed[*new_pos as usize].default_value(Some(&self.definition_patch));
796                    entry.push(DecodedData::new_from_type_and_value(field_type, &default_value));
797                }
798
799                // Otherwise, we got a moved column. Check here if it needs type conversion.
800                else if new_fields_processed[*new_pos as usize].field_type() != old_fields_processed[*old_pos as usize].field_type() {
801                    let converted_data = match row[*old_pos as usize].convert_between_types(new_fields_processed[*new_pos as usize].field_type()) {
802                        Ok(data) => data,
803                        Err(_) => {
804                            let field_type = new_fields_processed[*new_pos as usize].field_type();
805                            let default_value = new_fields_processed[*new_pos as usize].default_value(Some(&self.definition_patch));
806                            DecodedData::new_from_type_and_value(field_type, &default_value)
807                        }
808                    };
809                    entry.push(converted_data);
810                }
811
812                // If we reach this, we just got a moved column without any extra change.
813                else {
814                    entry.push(row[*old_pos as usize].clone());
815                }
816            }
817            new_entries.push(entry);
818        }
819
820        self.table_data = new_entries;
821
822        // Then, we finally replace our definition and our data.
823        self.definition = new_definition.clone();
824    }
825
826    fn set_data(&mut self, data: &[Vec<DecodedData>]) -> Result<()> {
827        let fields_processed = self.definition.fields_processed();
828        for row in data {
829
830            // First, we need to make sure all rows we have are exactly what we expect.
831            if row.len() != fields_processed.len() {
832                return Err(RLibError::TableRowWrongFieldCount(fields_processed.len(), row.len()))
833            }
834
835            for (index, cell) in row.iter().enumerate() {
836
837                // Next, we need to ensure each file is of the type we expected.
838                let field = fields_processed.get(index).unwrap();
839                if !cell.is_field_type_correct(field.field_type()) {
840                    return Err(RLibError::EncodingTableWrongFieldType(FieldType::from(cell).to_string(), field.field_type().to_string()))
841                }
842            }
843        }
844
845        // If we passed all the checks, replace the data.
846        self.table_data = data.to_vec();
847        Ok(())
848    }
849
850    fn column_position_by_name(&self, column_name: &str) -> Option<usize> {
851        self.definition().column_position_by_name(column_name)
852    }
853
854    fn is_empty(&self) -> bool {
855        self.data().is_empty()
856    }
857
858    fn len(&self) -> usize {
859        self.data().len()
860    }
861
862    fn rows_containing_data(&self, column_name: &str, data: &str) -> Option<(usize, Vec<usize>)> {
863        let mut row_indexes = vec![];
864
865        let column_index = self.column_position_by_name(column_name)?;
866        for (row_index, row) in self.data().iter().enumerate() {
867            if let Some(cell_data) = row.get(column_index) {
868                if cell_data.data_to_string() == data {
869                    row_indexes.push(row_index);
870                }
871            }
872        }
873
874        if row_indexes.is_empty() {
875            None
876        } else {
877            Some((column_index, row_indexes))
878        }
879    }
880}