diff --git a/src/main/java/io/lettuce/core/AbstractRedisAsyncCommands.java b/src/main/java/io/lettuce/core/AbstractRedisAsyncCommands.java index 1b48a32e3f..d92ef79de2 100644 --- a/src/main/java/io/lettuce/core/AbstractRedisAsyncCommands.java +++ b/src/main/java/io/lettuce/core/AbstractRedisAsyncCommands.java @@ -52,7 +52,7 @@ import io.lettuce.core.search.AggregationReply; import io.lettuce.core.search.AggregationReply.Cursor; import io.lettuce.core.search.HybridReply; - +import io.lettuce.core.search.IndexInfo; import io.lettuce.core.search.SearchReply; import io.lettuce.core.search.SpellCheckResult; import io.lettuce.core.search.Suggestion; @@ -1638,6 +1638,11 @@ public RedisFuture> ftList() { return dispatch(searchCommandBuilder.ftList()); } + @Override + public RedisFuture> ftInfo(String index) { + return dispatch(searchCommandBuilder.ftInfo(index)); + } + @Override public RedisFuture>> ftSyndump(String index) { return dispatch(searchCommandBuilder.ftSyndump(index)); diff --git a/src/main/java/io/lettuce/core/AbstractRedisReactiveCommands.java b/src/main/java/io/lettuce/core/AbstractRedisReactiveCommands.java index 3a66d5c1a1..d1329bfb71 100644 --- a/src/main/java/io/lettuce/core/AbstractRedisReactiveCommands.java +++ b/src/main/java/io/lettuce/core/AbstractRedisReactiveCommands.java @@ -54,6 +54,7 @@ import io.lettuce.core.search.AggregationReply.Cursor; import io.lettuce.core.search.HybridReply; +import io.lettuce.core.search.IndexInfo; import io.lettuce.core.search.SearchReply; import io.lettuce.core.search.SpellCheckResult; import io.lettuce.core.search.Suggestion; @@ -1699,6 +1700,11 @@ public Flux ftList() { return createDissolvingFlux(() -> searchCommandBuilder.ftList()); } + @Override + public Mono> ftInfo(String index) { + return createMono(() -> searchCommandBuilder.ftInfo(index)); + } + @Override public Mono>> ftSyndump(String index) { return createMono(() -> searchCommandBuilder.ftSyndump(index)); diff --git a/src/main/java/io/lettuce/core/RediSearchCommandBuilder.java b/src/main/java/io/lettuce/core/RediSearchCommandBuilder.java index 91b1a914a1..9b45388993 100644 --- a/src/main/java/io/lettuce/core/RediSearchCommandBuilder.java +++ b/src/main/java/io/lettuce/core/RediSearchCommandBuilder.java @@ -22,18 +22,8 @@ import io.lettuce.core.protocol.Command; import io.lettuce.core.protocol.CommandArgs; import io.lettuce.core.protocol.CommandKeyword; -import io.lettuce.core.search.AggregateReplyParser; -import io.lettuce.core.search.AggregationReply; -import io.lettuce.core.search.HybridReply; -import io.lettuce.core.search.HybridReplyParser; - -import io.lettuce.core.search.SearchReply; -import io.lettuce.core.search.SearchReplyParser; -import io.lettuce.core.search.SpellCheckResult; -import io.lettuce.core.search.SpellCheckResultParser; -import io.lettuce.core.search.Suggestion; -import io.lettuce.core.search.SuggestionParser; -import io.lettuce.core.search.SynonymMapParser; +import io.lettuce.core.search.*; + import io.lettuce.core.search.arguments.AggregateArgs; import io.lettuce.core.search.arguments.CreateArgs; import io.lettuce.core.search.arguments.ExplainArgs; @@ -411,6 +401,20 @@ public Command ftExplain(String index, V query, ExplainArgs return createCommand(FT_EXPLAIN, new StatusOutput<>(codec), commandArgs); } + /** + * Return information and statistics about an index. + * + * @param index the index name + * @return an IndexInfo object containing index information and statistics + */ + public Command> ftInfo(String index) { + LettuceAssert.notNull(index, "Index must not be null"); + + CommandArgs commandArgs = new CommandArgs<>(codec).add(index); + + return createCommand(FT_INFO, new EncodedComplexOutput<>(codec, new IndexInfoParser<>(codec)), commandArgs); + } + /** * Return a list of all existing indexes. * diff --git a/src/main/java/io/lettuce/core/api/async/RediSearchAsyncCommands.java b/src/main/java/io/lettuce/core/api/async/RediSearchAsyncCommands.java index 40e7a354db..8083642a6f 100644 --- a/src/main/java/io/lettuce/core/api/async/RediSearchAsyncCommands.java +++ b/src/main/java/io/lettuce/core/api/async/RediSearchAsyncCommands.java @@ -12,6 +12,7 @@ import io.lettuce.core.annotations.Experimental; import io.lettuce.core.search.AggregationReply; import io.lettuce.core.search.HybridReply; +import io.lettuce.core.search.IndexInfo; import io.lettuce.core.search.SearchReply; import io.lettuce.core.search.SpellCheckResult; import io.lettuce.core.search.Suggestion; @@ -628,6 +629,56 @@ public interface RediSearchAsyncCommands { @Experimental RedisFuture> ftList(); + /** + * Return information and statistics about a search index. + * + *

+ * This command returns detailed information and statistics about a specified search index, including configuration, schema + * definition, memory usage, indexing progress, and performance metrics. + *

+ * + *

+ * The returned map contains various categories of information: + *

+ *
    + *
  • General: index_name, index_options, index_definition, attributes, num_docs, max_doc_id, num_terms, + * num_records
  • + *
  • Size statistics: inverted_sz_mb, vector_index_sz_mb, doc_table_size_mb, sortable_values_size_mb, + * key_table_size_mb, etc.
  • + *
  • Indexing statistics: hash_indexing_failures, total_indexing_time, indexing, percent_indexed, + * number_of_uses
  • + *
  • Garbage collection: bytes_collected, total_ms_run, total_cycles, average_cycle_time_ms, + * last_run_time_ms
  • + *
  • Cursor statistics: global_idle, global_total, index_capacity, index_total
  • + *
  • Dialect statistics: Usage counts for each query dialect (1-4)
  • + *
  • Error statistics: Indexing failures and errors per field
  • + *
+ * + *

+ * Key use cases: + *

+ *
    + *
  • Monitoring: Track index health, memory usage, and performance
  • + *
  • Debugging: Identify indexing failures and errors
  • + *
  • Capacity planning: Analyze memory consumption and growth trends
  • + *
  • Performance tuning: Review indexing time and garbage collection metrics
  • + *
+ * + *

+ * Time complexity: O(1) + *

+ * + * @param index the index name + * @return an IndexInfo object containing index information and statistics + * @since 6.8 + * @see FT.INFO + * @see #ftCreate(String, CreateArgs, List) + * @see #ftList() + * @see #ftDropindex(String) + */ + @Experimental + RedisFuture> ftInfo(String index); + /** * Dump synonym group contents. * diff --git a/src/main/java/io/lettuce/core/api/reactive/RediSearchReactiveCommands.java b/src/main/java/io/lettuce/core/api/reactive/RediSearchReactiveCommands.java index 24bf60ee84..add8d6caed 100644 --- a/src/main/java/io/lettuce/core/api/reactive/RediSearchReactiveCommands.java +++ b/src/main/java/io/lettuce/core/api/reactive/RediSearchReactiveCommands.java @@ -12,6 +12,7 @@ import io.lettuce.core.annotations.Experimental; import io.lettuce.core.search.AggregationReply; import io.lettuce.core.search.HybridReply; +import io.lettuce.core.search.IndexInfo; import io.lettuce.core.search.SearchReply; import io.lettuce.core.search.SpellCheckResult; import io.lettuce.core.search.Suggestion; @@ -630,6 +631,56 @@ public interface RediSearchReactiveCommands { @Experimental Flux ftList(); + /** + * Return information and statistics about a search index. + * + *

+ * This command returns detailed information and statistics about a specified search index, including configuration, schema + * definition, memory usage, indexing progress, and performance metrics. + *

+ * + *

+ * The returned map contains various categories of information: + *

+ *
    + *
  • General: index_name, index_options, index_definition, attributes, num_docs, max_doc_id, num_terms, + * num_records
  • + *
  • Size statistics: inverted_sz_mb, vector_index_sz_mb, doc_table_size_mb, sortable_values_size_mb, + * key_table_size_mb, etc.
  • + *
  • Indexing statistics: hash_indexing_failures, total_indexing_time, indexing, percent_indexed, + * number_of_uses
  • + *
  • Garbage collection: bytes_collected, total_ms_run, total_cycles, average_cycle_time_ms, + * last_run_time_ms
  • + *
  • Cursor statistics: global_idle, global_total, index_capacity, index_total
  • + *
  • Dialect statistics: Usage counts for each query dialect (1-4)
  • + *
  • Error statistics: Indexing failures and errors per field
  • + *
+ * + *

+ * Key use cases: + *

+ *
    + *
  • Monitoring: Track index health, memory usage, and performance
  • + *
  • Debugging: Identify indexing failures and errors
  • + *
  • Capacity planning: Analyze memory consumption and growth trends
  • + *
  • Performance tuning: Review indexing time and garbage collection metrics
  • + *
+ * + *

+ * Time complexity: O(1) + *

+ * + * @param index the index name + * @return an IndexInfo object containing index information and statistics + * @since 6.8 + * @see FT.INFO + * @see #ftCreate(String, CreateArgs, List) + * @see #ftList() + * @see #ftDropindex(String) + */ + @Experimental + Mono> ftInfo(String index); + /** * Dump synonym group contents. * diff --git a/src/main/java/io/lettuce/core/api/sync/RediSearchCommands.java b/src/main/java/io/lettuce/core/api/sync/RediSearchCommands.java index a1ee184d71..6cbc2f5486 100644 --- a/src/main/java/io/lettuce/core/api/sync/RediSearchCommands.java +++ b/src/main/java/io/lettuce/core/api/sync/RediSearchCommands.java @@ -12,6 +12,7 @@ import io.lettuce.core.annotations.Experimental; import io.lettuce.core.search.AggregationReply; import io.lettuce.core.search.HybridReply; +import io.lettuce.core.search.IndexInfo; import io.lettuce.core.search.SearchReply; import io.lettuce.core.search.SpellCheckResult; import io.lettuce.core.search.Suggestion; @@ -628,6 +629,56 @@ public interface RediSearchCommands { @Experimental List ftList(); + /** + * Return information and statistics about a search index. + * + *

+ * This command returns detailed information and statistics about a specified search index, including configuration, schema + * definition, memory usage, indexing progress, and performance metrics. + *

+ * + *

+ * The returned map contains various categories of information: + *

+ *
    + *
  • General: index_name, index_options, index_definition, attributes, num_docs, max_doc_id, num_terms, + * num_records
  • + *
  • Size statistics: inverted_sz_mb, vector_index_sz_mb, doc_table_size_mb, sortable_values_size_mb, + * key_table_size_mb, etc.
  • + *
  • Indexing statistics: hash_indexing_failures, total_indexing_time, indexing, percent_indexed, + * number_of_uses
  • + *
  • Garbage collection: bytes_collected, total_ms_run, total_cycles, average_cycle_time_ms, + * last_run_time_ms
  • + *
  • Cursor statistics: global_idle, global_total, index_capacity, index_total
  • + *
  • Dialect statistics: Usage counts for each query dialect (1-4)
  • + *
  • Error statistics: Indexing failures and errors per field
  • + *
+ * + *

+ * Key use cases: + *

+ *
    + *
  • Monitoring: Track index health, memory usage, and performance
  • + *
  • Debugging: Identify indexing failures and errors
  • + *
  • Capacity planning: Analyze memory consumption and growth trends
  • + *
  • Performance tuning: Review indexing time and garbage collection metrics
  • + *
+ * + *

+ * Time complexity: O(1) + *

+ * + * @param index the index name + * @return an IndexInfo object containing index information and statistics + * @since 6.8 + * @see FT.INFO + * @see #ftCreate(String, CreateArgs, List) + * @see #ftList() + * @see #ftDropindex(String) + */ + @Experimental + IndexInfo ftInfo(String index); + /** * Dump synonym group contents. * diff --git a/src/main/java/io/lettuce/core/protocol/CommandType.java b/src/main/java/io/lettuce/core/protocol/CommandType.java index de6d59788e..4ef3b97b99 100644 --- a/src/main/java/io/lettuce/core/protocol/CommandType.java +++ b/src/main/java/io/lettuce/core/protocol/CommandType.java @@ -120,10 +120,11 @@ public enum CommandType implements ProtocolKeyword { FT_AGGREGATE("FT.AGGREGATE"), FT_ALIASADD("FT.ALIASADD"), FT_ALIASDEL("FT.ALIASDEL"), FT_ALIASUPDATE( "FT.ALIASUPDATE"), FT_ALTER("FT.ALTER"), FT_CREATE("FT.CREATE"), FT_CURSOR("FT.CURSOR"), FT_DICTADD( "FT.DICTADD"), FT_DICTDEL("FT.DICTDEL"), FT_DICTDUMP("FT.DICTDUMP"), FT_DROPINDEX( - "FT.DROPINDEX"), FT_EXPLAIN("FT.EXPLAIN"), FT_HYBRID("FT.HYBRID"), FT_LIST("FT._LIST"), FT_SEARCH( - "FT.SEARCH"), FT_SPELLCHECK("FT.SPELLCHECK"), FT_SUGADD("FT.SUGADD"), FT_SUGDEL( - "FT.SUGDEL"), FT_SUGGET("FT.SUGGET"), FT_SUGLEN("FT.SUGLEN"), FT_SYNDUMP( - "FT.SYNDUMP"), FT_SYNUPDATE("FT.SYNUPDATE"), FT_TAGVALS("FT.TAGVALS"), + "FT.DROPINDEX"), FT_EXPLAIN("FT.EXPLAIN"), FT_HYBRID("FT.HYBRID"), FT_INFO("FT.INFO"), FT_LIST( + "FT._LIST"), FT_SEARCH("FT.SEARCH"), FT_SPELLCHECK("FT.SPELLCHECK"), FT_SUGADD( + "FT.SUGADD"), FT_SUGDEL("FT.SUGDEL"), FT_SUGGET("FT.SUGGET"), FT_SUGLEN( + "FT.SUGLEN"), FT_SYNDUMP("FT.SYNDUMP"), FT_SYNUPDATE( + "FT.SYNUPDATE"), FT_TAGVALS("FT.TAGVALS"), // Others diff --git a/src/main/java/io/lettuce/core/search/IndexInfo.java b/src/main/java/io/lettuce/core/search/IndexInfo.java new file mode 100644 index 0000000000..5a8bcdbb8c --- /dev/null +++ b/src/main/java/io/lettuce/core/search/IndexInfo.java @@ -0,0 +1,1784 @@ +/* + * Copyright 2024, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + */ +package io.lettuce.core.search; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; + +/** + * Represents the information and statistics returned by the FT.INFO command for a RediSearch index. + *

+ * This class encapsulates comprehensive details about a search index including its configuration, schema definition, memory + * usage, indexing progress, and performance metrics. The information is organized into several categories: + *

+ *
    + *
  • General information: index name, options, definition, schema attributes, document counts
  • + *
  • Size statistics: memory usage for various index components (inverted index, vectors, documents, + * etc.)
  • + *
  • Indexing statistics: indexing progress, failures, and timing information
  • + *
  • Garbage collection: GC cycles, bytes collected, and timing metrics
  • + *
  • Cursor statistics: information about active cursors for pagination
  • + *
  • Dialect statistics: usage counts for different query dialects
  • + *
  • Error statistics: indexing failures and errors per field
  • + *
+ * + * @param Value type. + * @author Julien Ruaux + * @see FT.INFO + * @since 6.8 + */ +public class IndexInfo { + + private String indexName; + + private boolean noOffsets; + + private boolean noHighlight; + + private boolean noFields; + + private boolean noFrequency; + + private boolean maxTextFields; + + private boolean skipInitialScan; + + private IndexDefinition indexDefinition = new IndexDefinition<>(); + + private final List> fields = new ArrayList<>(); + + private long numDocs; + + private long maxDocId; + + private long numTerms; + + private long numRecords; + + // Strongly-typed statistics objects + private SizeStatistics sizeStats = new SizeStatistics(); + + private IndexingStatistics indexingStats = new IndexingStatistics(); + + private GcStatistics gcStats = new GcStatistics(); + + private CursorStatistics cursorStats = new CursorStatistics(); + + private DialectStatistics dialectStats = new DialectStatistics(); + + private ErrorStatistics indexErrors = new ErrorStatistics(); + + private final List fieldStatistics = new ArrayList<>(); + + private final Map additionalFields = new HashMap<>(); + + /** + * Creates a new empty IndexInfo instance. + */ + public IndexInfo() { + } + + /** + * Gets the index name. + * + * @return the index name + */ + public String getIndexName() { + return indexName; + } + + void setIndexName(String indexName) { + this.indexName = indexName; + } + + /** + * Returns whether the index was created with NOOFFSETS option. If set, term offsets are not stored for documents (saves + * memory, does not allow exact searches or highlighting). + * + * @return {@code true} if NOOFFSETS is set + */ + public boolean isNoOffsets() { + return noOffsets; + } + + void setNoOffsets(boolean noOffsets) { + this.noOffsets = noOffsets; + } + + /** + * Returns whether the index was created with NOHL (no highlighting) option. Conserves storage space and memory by disabling + * highlighting support. Also implied by NOOFFSETS. + * + * @return {@code true} if NOHL is set + */ + public boolean isNoHighlight() { + return noHighlight; + } + + void setNoHighlight(boolean noHighlight) { + this.noHighlight = noHighlight; + } + + /** + * Returns whether the index was created with NOFIELDS option. If set, field bits are not stored for each term (saves + * memory, does not allow filtering by specific fields). + * + * @return {@code true} if NOFIELDS is set + */ + public boolean isNoFields() { + return noFields; + } + + void setNoFields(boolean noFields) { + this.noFields = noFields; + } + + /** + * Returns whether the index was created with NOFREQS option. If set, term frequencies are not saved in the index (saves + * memory, does not allow sorting based on term frequencies). + * + * @return {@code true} if NOFREQS is set + */ + public boolean isNoFrequency() { + return noFrequency; + } + + void setNoFrequency(boolean noFrequency) { + this.noFrequency = noFrequency; + } + + /** + * Returns whether the index was created with MAXTEXTFIELDS option. Forces RediSearch to encode indexes as if there were + * more than 32 text fields, which allows adding additional fields (beyond 32) using FT.ALTER. + * + * @return {@code true} if MAXTEXTFIELDS is set + */ + public boolean isMaxTextFields() { + return maxTextFields; + } + + void setMaxTextFields(boolean maxTextFields) { + this.maxTextFields = maxTextFields; + } + + /** + * Returns whether the index was created with SKIPINITIALSCAN option. If set, the index was not scanned for existing + * documents in the keyspace upon creation. + * + * @return {@code true} if SKIPINITIALSCAN is set + */ + public boolean isSkipInitialScan() { + return skipInitialScan; + } + + void setSkipInitialScan(boolean skipInitialScan) { + this.skipInitialScan = skipInitialScan; + } + + /** + * Gets the index definition including key_type (HASH or JSON), prefixes, filters, and other configuration. + * + * @return the index definition (never null, but individual fields may be empty/null if not available) + */ + public IndexDefinition getIndexDefinition() { + return indexDefinition; + } + + void setIndexDefinition(IndexDefinition indexDefinition) { + this.indexDefinition = indexDefinition; + } + + /** + * Gets the index schema fields (field names, types, and attributes). + * + * @return an unmodifiable list of fields + */ + public List> getFields() { + return Collections.unmodifiableList(fields); + } + + void addField(Field field) { + this.fields.add(field); + } + + /** + * Gets the number of documents in the index. + * + * @return the number of documents + */ + public long getNumDocs() { + return numDocs; + } + + void setNumDocs(long numDocs) { + this.numDocs = numDocs; + } + + /** + * Gets the maximum document ID. + * + * @return the maximum document ID + */ + public long getMaxDocId() { + return maxDocId; + } + + void setMaxDocId(long maxDocId) { + this.maxDocId = maxDocId; + } + + /** + * Gets the number of distinct terms in the index. + * + * @return the number of distinct terms + */ + public long getNumTerms() { + return numTerms; + } + + void setNumTerms(long numTerms) { + this.numTerms = numTerms; + } + + /** + * Gets the total number of records in the index. + * + * @return the total number of records + */ + public long getNumRecords() { + return numRecords; + } + + void setNumRecords(long numRecords) { + this.numRecords = numRecords; + } + + /** + * Gets dialect usage statistics showing how many times each query dialect (1-4) was used. + * + * @return dialect statistics (never null, but individual fields may be null if not available) + */ + public DialectStatistics getDialectStats() { + return dialectStats; + } + + void setDialectStats(DialectStatistics dialectStats) { + this.dialectStats = dialectStats; + } + + /** + * Gets index-level error statistics including indexing failures and last error information. + * + * @return error statistics (never null, but individual fields may be null if not available) + */ + public ErrorStatistics getIndexErrors() { + return indexErrors; + } + + void setIndexErrors(ErrorStatistics indexErrors) { + this.indexErrors = indexErrors; + } + + /** + * Gets per-field error statistics. + *

+ * Each entry contains field information and error statistics for that field. + *

+ * + * @return an unmodifiable list of field error statistics + */ + public List getFieldStatistics() { + return Collections.unmodifiableList(fieldStatistics); + } + + void addFieldStatistic(FieldErrorStatistics fieldStatistic) { + this.fieldStatistics.add(fieldStatistic); + } + + /** + * Gets additional fields that were returned by FT.INFO but are not explicitly mapped to known properties. + *

+ * This map captures any fields that are not recognized by the current version of the parser, making the implementation + * forward-compatible with future Redis versions that may add new fields to the FT.INFO response. + *

+ * + * @return an unmodifiable map of additional fields + */ + public Map getAdditionalFields() { + return Collections.unmodifiableMap(additionalFields); + } + + void putAdditionalField(String key, Object value) { + this.additionalFields.put(key, value); + } + + // ========== Strongly-Typed Statistics Accessors ========== + + /** + * Gets size statistics including memory usage for various index components. + * + * @return size statistics (never null, but individual fields may be null if not available) + */ + public SizeStatistics getSizeStats() { + return sizeStats; + } + + void setSizeStats(SizeStatistics sizeStats) { + this.sizeStats = sizeStats; + } + + /** + * Gets indexing-related statistics including progress, failures, and timing. + * + * @return indexing statistics (never null, but individual fields may be null if not available) + */ + public IndexingStatistics getIndexingStats() { + return indexingStats; + } + + void setIndexingStats(IndexingStatistics indexingStats) { + this.indexingStats = indexingStats; + } + + /** + * Gets garbage collection statistics. + * + * @return GC statistics (never null, but individual fields may be null if not available) + */ + public GcStatistics getGcStats() { + return gcStats; + } + + void setGcStats(GcStatistics gcStats) { + this.gcStats = gcStats; + } + + /** + * Gets cursor statistics for pagination. + * + * @return cursor statistics (never null, but individual fields may be null if not available) + */ + public CursorStatistics getCursorStats() { + return cursorStats; + } + + void setCursorStats(CursorStatistics cursorStats) { + this.cursorStats = cursorStats; + } + + @Override + public String toString() { + return "IndexInfo{" + "indexName='" + indexName + '\'' + ", numDocs=" + numDocs + ", maxDocId=" + maxDocId + + ", numTerms=" + numTerms + ", numRecords=" + numRecords + '}'; + } + + // ========== Nested Classes ========== + + /** + * Size statistics including memory usage for various index components. + * + * @since 6.8 + */ + public static class SizeStatistics { + + private double invertedSizeMb; + + private double vectorIndexSizeMb; + + private long totalInvertedIndexBlocks; + + private double offsetVectorsSizeMb; + + private double docTableSizeMb; + + private double sortableValuesSizeMb; + + private double keyTableSizeMb; + + private double geoshapesSizeMb; + + private double recordsPerDocAvg; + + private double bytesPerRecordAvg; + + private double offsetsPerTermAvg; + + private double offsetBitsPerRecordAvg; + + private double tagOverheadSizeMb; + + private double textOverheadSizeMb; + + private double totalIndexMemorySizeMb; + + SizeStatistics() { + } + + /** + * Gets the memory used by the inverted index in megabytes. + * + * @return the inverted index size in MB + */ + public double getInvertedSizeMb() { + return invertedSizeMb; + } + + /** + * Gets the memory used by vector indexes in megabytes. + * + * @return the vector index size in MB + */ + public double getVectorIndexSizeMb() { + return vectorIndexSizeMb; + } + + /** + * Gets the total number of blocks in the inverted index. + * + * @return the total inverted index blocks + */ + public long getTotalInvertedIndexBlocks() { + return totalInvertedIndexBlocks; + } + + /** + * Gets the memory used by offset vectors in megabytes. + * + * @return the offset vectors size in MB + */ + public double getOffsetVectorsSizeMb() { + return offsetVectorsSizeMb; + } + + /** + * Gets the memory used by the document table in megabytes. + * + * @return the document table size in MB + */ + public double getDocTableSizeMb() { + return docTableSizeMb; + } + + /** + * Gets the memory used by sortable values in megabytes. + * + * @return the sortable values size in MB + */ + public double getSortableValuesSizeMb() { + return sortableValuesSizeMb; + } + + /** + * Gets the memory used by the key table in megabytes. + * + * @return the key table size in MB + */ + public double getKeyTableSizeMb() { + return keyTableSizeMb; + } + + /** + * Gets the memory used by GEO-related fields in megabytes. + * + * @return the geoshapes size in MB + */ + public double getGeoshapesSizeMb() { + return geoshapesSizeMb; + } + + /** + * Gets the average number of records per document. + * + * @return the average records per document + */ + public double getRecordsPerDocAvg() { + return recordsPerDocAvg; + } + + /** + * Gets the average size of each record in bytes. + * + * @return the average bytes per record + */ + public double getBytesPerRecordAvg() { + return bytesPerRecordAvg; + } + + /** + * Gets the average number of offsets per term. + * + * @return the average offsets per term + */ + public double getOffsetsPerTermAvg() { + return offsetsPerTermAvg; + } + + /** + * Gets the average number of bits used for offsets per record. + * + * @return the average offset bits per record + */ + public double getOffsetBitsPerRecordAvg() { + return offsetBitsPerRecordAvg; + } + + /** + * Gets the size of TAG index structures used for optimizing performance in megabytes. + * + * @return the TAG overhead size in MB + */ + public double getTagOverheadSizeMb() { + return tagOverheadSizeMb; + } + + /** + * Gets the size of TEXT index structures used for optimizing performance in megabytes. + * + * @return the TEXT overhead size in MB + */ + public double getTextOverheadSizeMb() { + return textOverheadSizeMb; + } + + /** + * Gets the total memory consumed by all indexes in the database in megabytes. + * + * @return the total index memory size in MB + */ + public double getTotalIndexMemorySizeMb() { + return totalIndexMemorySizeMb; + } + + void setInvertedSizeMb(double invertedSizeMb) { + this.invertedSizeMb = invertedSizeMb; + } + + void setVectorIndexSizeMb(double vectorIndexSizeMb) { + this.vectorIndexSizeMb = vectorIndexSizeMb; + } + + void setTotalInvertedIndexBlocks(long totalInvertedIndexBlocks) { + this.totalInvertedIndexBlocks = totalInvertedIndexBlocks; + } + + void setOffsetVectorsSizeMb(double offsetVectorsSizeMb) { + this.offsetVectorsSizeMb = offsetVectorsSizeMb; + } + + void setDocTableSizeMb(double docTableSizeMb) { + this.docTableSizeMb = docTableSizeMb; + } + + void setSortableValuesSizeMb(double sortableValuesSizeMb) { + this.sortableValuesSizeMb = sortableValuesSizeMb; + } + + void setKeyTableSizeMb(double keyTableSizeMb) { + this.keyTableSizeMb = keyTableSizeMb; + } + + void setGeoshapesSizeMb(double geoshapesSizeMb) { + this.geoshapesSizeMb = geoshapesSizeMb; + } + + void setRecordsPerDocAvg(double recordsPerDocAvg) { + this.recordsPerDocAvg = recordsPerDocAvg; + } + + void setBytesPerRecordAvg(double bytesPerRecordAvg) { + this.bytesPerRecordAvg = bytesPerRecordAvg; + } + + void setOffsetsPerTermAvg(double offsetsPerTermAvg) { + this.offsetsPerTermAvg = offsetsPerTermAvg; + } + + void setOffsetBitsPerRecordAvg(double offsetBitsPerRecordAvg) { + this.offsetBitsPerRecordAvg = offsetBitsPerRecordAvg; + } + + void setTagOverheadSizeMb(double tagOverheadSizeMb) { + this.tagOverheadSizeMb = tagOverheadSizeMb; + } + + void setTextOverheadSizeMb(double textOverheadSizeMb) { + this.textOverheadSizeMb = textOverheadSizeMb; + } + + void setTotalIndexMemorySizeMb(double totalIndexMemorySizeMb) { + this.totalIndexMemorySizeMb = totalIndexMemorySizeMb; + } + + } + + /** + * Indexing-related statistics including progress, failures, and timing. + * + * @since 6.8 + */ + public static class IndexingStatistics { + + private long hashIndexingFailures; + + private double totalIndexingTime; + + private boolean indexing; + + private double percentIndexed; + + private long numberOfUses; + + private boolean cleaning; + + IndexingStatistics() { + } + + /** + * Gets the number of failures encountered during indexing. + * + * @return the number of indexing failures + */ + public long getHashIndexingFailures() { + return hashIndexingFailures; + } + + /** + * Gets the cumulative wall-clock time spent indexing documents in milliseconds. + * + * @return the total indexing time in ms + */ + public double getTotalIndexingTime() { + return totalIndexingTime; + } + + /** + * Returns whether the index is currently being generated. + * + * @return {@code true} if indexing is in progress, {@code false} otherwise + */ + public boolean isIndexing() { + return indexing; + } + + /** + * Gets the percentage of the index that has been successfully generated (1.0 means 100%). + * + * @return the percent indexed (0.0-1.0) + */ + public double getPercentIndexed() { + return percentIndexed; + } + + /** + * Gets the number of times the index has been used. + * + * @return the number of uses + */ + public long getNumberOfUses() { + return numberOfUses; + } + + /** + * Returns whether index deletion is in progress. + * + * @return {@code true} if cleaning is in progress, {@code false} otherwise + */ + public boolean isCleaning() { + return cleaning; + } + + void setHashIndexingFailures(long hashIndexingFailures) { + this.hashIndexingFailures = hashIndexingFailures; + } + + void setTotalIndexingTime(double totalIndexingTime) { + this.totalIndexingTime = totalIndexingTime; + } + + void setIndexing(boolean indexing) { + this.indexing = indexing; + } + + void setPercentIndexed(double percentIndexed) { + this.percentIndexed = percentIndexed; + } + + void setNumberOfUses(long numberOfUses) { + this.numberOfUses = numberOfUses; + } + + void setCleaning(boolean cleaning) { + this.cleaning = cleaning; + } + + } + + /** + * Garbage collection statistics. + * + * @since 6.8 + */ + public static class GcStatistics { + + private long bytesCollected; + + private double totalMsRun; + + private long totalCycles; + + private double averageCycleTimeMs; + + private double lastRunTimeMs; + + private long gcNumericTreesMissed; + + private long gcBlocksDenied; + + GcStatistics() { + } + + /** + * Gets the number of bytes collected during garbage collection. + * + * @return the bytes collected + */ + public long getBytesCollected() { + return bytesCollected; + } + + /** + * Gets the total time in milliseconds spent on garbage collection. + * + * @return the total GC time in ms + */ + public double getTotalMsRun() { + return totalMsRun; + } + + /** + * Gets the total number of garbage collection cycles. + * + * @return the total GC cycles + */ + public long getTotalCycles() { + return totalCycles; + } + + /** + * Gets the average time in milliseconds for each garbage collection cycle. + * + * @return the average GC cycle time in ms (may be NaN) + */ + public double getAverageCycleTimeMs() { + return averageCycleTimeMs; + } + + /** + * Gets the time in milliseconds taken by the last garbage collection run. + * + * @return the last GC run time in ms + */ + public double getLastRunTimeMs() { + return lastRunTimeMs; + } + + /** + * Gets the number of numeric tree nodes whose changes were discarded during garbage collection. + * + * @return the number of numeric trees missed + */ + public long getGcNumericTreesMissed() { + return gcNumericTreesMissed; + } + + /** + * Gets the number of blocks whose changes were discarded during garbage collection. + * + * @return the number of blocks denied + */ + public long getGcBlocksDenied() { + return gcBlocksDenied; + } + + void setBytesCollected(long bytesCollected) { + this.bytesCollected = bytesCollected; + } + + void setTotalMsRun(double totalMsRun) { + this.totalMsRun = totalMsRun; + } + + void setTotalCycles(long totalCycles) { + this.totalCycles = totalCycles; + } + + void setAverageCycleTimeMs(double averageCycleTimeMs) { + this.averageCycleTimeMs = averageCycleTimeMs; + } + + void setLastRunTimeMs(double lastRunTimeMs) { + this.lastRunTimeMs = lastRunTimeMs; + } + + void setGcNumericTreesMissed(long gcNumericTreesMissed) { + this.gcNumericTreesMissed = gcNumericTreesMissed; + } + + void setGcBlocksDenied(long gcBlocksDenied) { + this.gcBlocksDenied = gcBlocksDenied; + } + + } + + /** + * Cursor statistics for pagination. + * + * @since 6.8 + */ + public static class CursorStatistics { + + private long globalIdle; + + private long globalTotal; + + private long indexCapacity; + + private long indexTotal; + + CursorStatistics() { + } + + /** + * Gets the number of idle cursors in the system. + * + * @return the global idle cursor count + */ + public long getGlobalIdle() { + return globalIdle; + } + + /** + * Gets the total number of cursors in the system. + * + * @return the global total cursor count + */ + public long getGlobalTotal() { + return globalTotal; + } + + /** + * Gets the maximum number of cursors allowed per index. + * + * @return the index capacity + */ + public long getIndexCapacity() { + return indexCapacity; + } + + /** + * Gets the total number of cursors open on this index. + * + * @return the index total cursor count + */ + public long getIndexTotal() { + return indexTotal; + } + + void setGlobalIdle(long globalIdle) { + this.globalIdle = globalIdle; + } + + void setGlobalTotal(long globalTotal) { + this.globalTotal = globalTotal; + } + + void setIndexCapacity(long indexCapacity) { + this.indexCapacity = indexCapacity; + } + + void setIndexTotal(long indexTotal) { + this.indexTotal = indexTotal; + } + + } + + /** + * Dialect usage statistics showing how many times each query dialect (1-4) was used. + * + * @since 6.8 + */ + public static class DialectStatistics { + + private long dialect1; + + private long dialect2; + + private long dialect3; + + private long dialect4; + + DialectStatistics() { + } + + /** + * Gets the number of times dialect 1 was used. + * + * @return the dialect 1 usage count + */ + public long getDialect1() { + return dialect1; + } + + void setDialect1(long dialect1) { + this.dialect1 = dialect1; + } + + /** + * Gets the number of times dialect 2 was used. + * + * @return the dialect 2 usage count + */ + public long getDialect2() { + return dialect2; + } + + void setDialect2(long dialect2) { + this.dialect2 = dialect2; + } + + /** + * Gets the number of times dialect 3 was used. + * + * @return the dialect 3 usage count + */ + public long getDialect3() { + return dialect3; + } + + void setDialect3(long dialect3) { + this.dialect3 = dialect3; + } + + /** + * Gets the number of times dialect 4 was used. + * + * @return the dialect 4 usage count + */ + public long getDialect4() { + return dialect4; + } + + void setDialect4(long dialect4) { + this.dialect4 = dialect4; + } + + } + + /** + * Error statistics including indexing failures and last error information. + * + * @since 6.8 + */ + public static class ErrorStatistics { + + private long indexingFailures; + + private String lastIndexingError; + + private String lastIndexingErrorKey; + + ErrorStatistics() { + } + + /** + * Gets the number of indexing failures. + * + * @return the number of indexing failures + */ + public long getIndexingFailures() { + return indexingFailures; + } + + void setIndexingFailures(long indexingFailures) { + this.indexingFailures = indexingFailures; + } + + /** + * Gets the description of the last indexing error. + * + * @return the last indexing error message, or {@code null} if not available + */ + public String getLastIndexingError() { + return lastIndexingError; + } + + void setLastIndexingError(String lastIndexingError) { + this.lastIndexingError = lastIndexingError; + } + + /** + * Gets the key that caused the last indexing error. + * + * @return the last indexing error key, or {@code null} if not available + */ + public String getLastIndexingErrorKey() { + return lastIndexingErrorKey; + } + + void setLastIndexingErrorKey(String lastIndexingErrorKey) { + this.lastIndexingErrorKey = lastIndexingErrorKey; + } + + } + + /** + * Per-field error statistics containing field information and error details. + * + * @since 6.8 + */ + public static class FieldErrorStatistics { + + private String identifier; + + private String attribute; + + private ErrorStatistics errors; + + FieldErrorStatistics() { + } + + /** + * Gets the field identifier (e.g., JSON path or field name). + * + * @return the field identifier + */ + public String getIdentifier() { + return identifier; + } + + void setIdentifier(String identifier) { + this.identifier = identifier; + } + + /** + * Gets the attribute name used in queries. + * + * @return the attribute name, or {@code null} if not available + */ + public String getAttribute() { + return attribute; + } + + void setAttribute(String attribute) { + this.attribute = attribute; + } + + /** + * Gets the error statistics for this field. + * + * @return the error statistics, or {@code null} if not available + */ + public ErrorStatistics getErrors() { + return errors; + } + + void setErrors(ErrorStatistics errors) { + this.errors = errors; + } + + } + + /** + * Represents the index definition returned by FT.INFO. + *

+ * Contains information about how the index was created, including the key type, prefixes, filters, and other configuration + * options. This mirrors the structure of {@link io.lettuce.core.search.arguments.CreateArgs} but is used for reading index + * information rather than building commands. + * + * @param Value type. + * @see FT.INFO + * @since 6.8 + */ + public static class IndexDefinition { + + /** + * Possible target types for the index. + */ + public enum TargetType { + HASH, JSON + } + + private TargetType keyType; + + private final List prefixes = new ArrayList<>(); + + private V filter; + + private V languageField; + + private V scoreField; + + private V payloadField; + + private double defaultScore; + + private V defaultLanguage; + + private final Map additionalFields = new LinkedHashMap<>(); + + /** + * Package-private constructor for IndexDefinition. + */ + IndexDefinition() { + } + + /** + * Get the key type (HASH or JSON). + * + * @return the key type, or {@code null} if not available + */ + public TargetType getKeyType() { + return keyType; + } + + void setKeyType(TargetType keyType) { + this.keyType = keyType; + } + + /** + * Get the list of key prefixes that the index applies to. + * + * @return an unmodifiable list of prefixes + */ + public List getPrefixes() { + return Collections.unmodifiableList(prefixes); + } + + void addPrefix(V prefix) { + this.prefixes.add(prefix); + } + + /** + * Get the filter expression used to select which keys to index. + * + * @return the filter expression, or {@code null} if not available + */ + public V getFilter() { + return filter; + } + + void setFilter(V filter) { + this.filter = filter; + } + + /** + * Get the language field name. + * + * @return the language field name, or {@code null} if not available + */ + public V getLanguageField() { + return languageField; + } + + void setLanguageField(V languageField) { + this.languageField = languageField; + } + + /** + * Get the score field name. + * + * @return the score field name, or {@code null} if not available + */ + public V getScoreField() { + return scoreField; + } + + void setScoreField(V scoreField) { + this.scoreField = scoreField; + } + + /** + * Get the payload field name. + * + * @return the payload field name, or {@code null} if not available + */ + public V getPayloadField() { + return payloadField; + } + + void setPayloadField(V payloadField) { + this.payloadField = payloadField; + } + + /** + * Get the default score for documents. + * + * @return the default score + */ + public double getDefaultScore() { + return defaultScore; + } + + void setDefaultScore(double defaultScore) { + this.defaultScore = defaultScore; + } + + /** + * Get the default language for text fields. + * + * @return the default language, or {@code null} if not available + */ + public V getDefaultLanguage() { + return defaultLanguage; + } + + void setDefaultLanguage(V defaultLanguage) { + this.defaultLanguage = defaultLanguage; + } + + /** + * Get additional fields that are not explicitly mapped. This is useful for forward compatibility with future Redis + * versions that may add new index definition properties. + * + * @return a map of additional properties + */ + public Map getAdditionalFields() { + return Collections.unmodifiableMap(additionalFields); + } + + void putAdditionalField(String key, Object value) { + this.additionalFields.put(key, value); + } + + } + + /** + * Base class for field information returned by FT.INFO. + *

+ * Represents a field in the index schema with its type and configuration. This mirrors the structure of + * {@link io.lettuce.core.search.arguments.FieldArgs} but is used for reading index information rather than building + * commands. + * + * @param Value type. + * @see Field and + * type options + * @since 6.8 + */ + public abstract static class Field { + + /** + * Field type enumeration. + */ + public enum FieldType { + TEXT, NUMERIC, TAG, GEO, GEOSHAPE, VECTOR + } + + private final V identifier; + + private final V attribute; + + private final FieldType type; + + private final boolean sortable; + + private final boolean unNormalizedForm; + + private final boolean noIndex; + + private final boolean indexEmpty; + + private final boolean indexMissing; + + private final Map additionalFields; + + /** + * Constructor for Field. + * + * @param identifier the field identifier (e.g., JSON path or field name) + * @param attribute the attribute name used in queries (may be null) + * @param type the field type + * @param sortable whether the field is sortable + * @param unNormalizedForm whether unnormalized form is used + * @param noIndex whether the field is not indexed + * @param indexEmpty whether empty values are indexed + * @param indexMissing whether missing values are indexed + * @param additionalFields additional fields not explicitly mapped + */ + protected Field(V identifier, V attribute, FieldType type, boolean sortable, boolean unNormalizedForm, boolean noIndex, + boolean indexEmpty, boolean indexMissing, Map additionalFields) { + this.identifier = identifier; + this.attribute = attribute; + this.type = type; + this.sortable = sortable; + this.unNormalizedForm = unNormalizedForm; + this.noIndex = noIndex; + this.indexEmpty = indexEmpty; + this.indexMissing = indexMissing; + this.additionalFields = new LinkedHashMap<>(additionalFields); + } + + /** + * Get the field identifier (e.g., JSON path or field name). + * + * @return the field identifier + */ + public V getIdentifier() { + return identifier; + } + + /** + * Get the attribute name used in queries. + * + * @return the attribute name, or null if not set + */ + public V getAttribute() { + return attribute; + } + + /** + * Get the field type. + * + * @return the field type + */ + public FieldType getType() { + return type; + } + + /** + * Check if the field is sortable. + * + * @return true if sortable + */ + public boolean isSortable() { + return sortable; + } + + /** + * Check if the field uses unnormalized form. + * + * @return true if unnormalized form + */ + public boolean isUnNormalizedForm() { + return unNormalizedForm; + } + + /** + * Check if the field is not indexed. + * + * @return true if not indexed + */ + public boolean isNoIndex() { + return noIndex; + } + + /** + * Check if the field indexes empty values. + * + * @return true if indexes empty values + */ + public boolean isIndexEmpty() { + return indexEmpty; + } + + /** + * Check if the field indexes missing values. + * + * @return true if indexes missing values + */ + public boolean isIndexMissing() { + return indexMissing; + } + + /** + * Get additional fields that are not explicitly mapped. This is useful for forward compatibility with future Redis + * versions that may add new field properties. + * + * @return a map of additional properties + */ + public Map getAdditionalFields() { + return Collections.unmodifiableMap(additionalFields); + } + + } + + /** + * Represents a TEXT field in the index schema. + *

+ * Text fields are specifically designed for storing human language text. When indexing text fields, Redis performs several + * transformations to optimize search capabilities including lowercasing and tokenization. + * + * @param Value type. + * @see Text + * Fields + * @since 6.8 + */ + public static class TextField extends Field { + + private final Double weight; + + private final boolean noStem; + + private final String phonetic; + + private final boolean withSuffixTrie; + + /** + * Constructor for TextField. + * + * @param identifier the field identifier + * @param attribute the attribute name (may be null) + * @param sortable whether the field is sortable + * @param unNormalizedForm whether unnormalized form is used + * @param noIndex whether the field is not indexed + * @param indexEmpty whether empty values are indexed + * @param indexMissing whether missing values are indexed + * @param weight the field weight (may be null) + * @param noStem whether stemming is disabled + * @param phonetic the phonetic matcher (may be null) + * @param withSuffixTrie whether suffix trie is enabled + * @param additionalFields additional fields not explicitly mapped + */ + public TextField(V identifier, V attribute, boolean sortable, boolean unNormalizedForm, boolean noIndex, + boolean indexEmpty, boolean indexMissing, Double weight, boolean noStem, String phonetic, + boolean withSuffixTrie, Map additionalFields) { + super(identifier, attribute, FieldType.TEXT, sortable, unNormalizedForm, noIndex, indexEmpty, indexMissing, + additionalFields); + this.weight = weight; + this.noStem = noStem; + this.phonetic = phonetic; + this.withSuffixTrie = withSuffixTrie; + } + + /** + * Get the field weight. + * + * @return the weight, or null if not set + */ + public Double getWeight() { + return weight; + } + + /** + * Check if stemming is disabled. + * + * @return true if stemming is disabled + */ + public boolean isNoStem() { + return noStem; + } + + /** + * Get the phonetic matcher. + * + * @return the phonetic matcher, or null if not set + */ + public String getPhonetic() { + return phonetic; + } + + /** + * Check if suffix trie is enabled. + * + * @return true if suffix trie is enabled + */ + public boolean isWithSuffixTrie() { + return withSuffixTrie; + } + + } + + /** + * Represents a NUMERIC field in the index schema. + *

+ * Numeric fields are used to store non-textual, countable values. They can hold integer or floating-point values and + * support range-based queries. + * + * @param Value type. + * @see Numeric + * Fields + * @since 6.8 + */ + public static class NumericField extends Field { + + /** + * Constructor for NumericField. + * + * @param identifier the field identifier + * @param attribute the attribute name (may be null) + * @param sortable whether the field is sortable + * @param unNormalizedForm whether unnormalized form is used + * @param noIndex whether the field is not indexed + * @param indexEmpty whether empty values are indexed + * @param indexMissing whether missing values are indexed + * @param additionalFields additional fields not explicitly mapped + */ + public NumericField(V identifier, V attribute, boolean sortable, boolean unNormalizedForm, boolean noIndex, + boolean indexEmpty, boolean indexMissing, Map additionalFields) { + super(identifier, attribute, FieldType.NUMERIC, sortable, unNormalizedForm, noIndex, indexEmpty, indexMissing, + additionalFields); + } + + } + + /** + * Represents a TAG field in the index schema. + *

+ * Tag fields are used to store textual data that represents a collection of data tags or labels. Unlike text fields, tag + * fields are stored as-is without tokenization or stemming. + * + * @param Value type. + * @see Tag + * Fields + * @since 6.8 + */ + public static class TagField extends Field { + + private final String separator; + + private final boolean caseSensitive; + + private final boolean withSuffixTrie; + + /** + * Constructor for TagField. + * + * @param identifier the field identifier + * @param attribute the attribute name (may be null) + * @param sortable whether the field is sortable + * @param unNormalizedForm whether unnormalized form is used + * @param noIndex whether the field is not indexed + * @param indexEmpty whether empty values are indexed + * @param indexMissing whether missing values are indexed + * @param separator the tag separator (may be null) + * @param caseSensitive whether case-sensitive + * @param withSuffixTrie whether suffix trie is enabled + * @param additionalFields additional fields not explicitly mapped + */ + public TagField(V identifier, V attribute, boolean sortable, boolean unNormalizedForm, boolean noIndex, + boolean indexEmpty, boolean indexMissing, String separator, boolean caseSensitive, boolean withSuffixTrie, + Map additionalFields) { + super(identifier, attribute, FieldType.TAG, sortable, unNormalizedForm, noIndex, indexEmpty, indexMissing, + additionalFields); + this.separator = separator; + this.caseSensitive = caseSensitive; + this.withSuffixTrie = withSuffixTrie; + } + + /** + * Get the tag separator. + * + * @return the separator, or null if not set + */ + public String getSeparator() { + return separator; + } + + /** + * Check if the field is case sensitive. + * + * @return true if case sensitive + */ + public boolean isCaseSensitive() { + return caseSensitive; + } + + /** + * Check if suffix trie is enabled. + * + * @return true if suffix trie is enabled + */ + public boolean isWithSuffixTrie() { + return withSuffixTrie; + } + + } + + /** + * Represents a GEO field in the index schema. + *

+ * Geo fields are used to store geographical coordinates such as longitude and latitude, enabling geospatial radius queries. + * + * @param Value type. + * @see Geo + * Fields + * @since 6.8 + */ + public static class GeoField extends Field { + + /** + * Constructor for GeoField. + * + * @param identifier the field identifier + * @param attribute the attribute name (may be null) + * @param sortable whether the field is sortable + * @param unNormalizedForm whether unnormalized form is used + * @param noIndex whether the field is not indexed + * @param indexEmpty whether empty values are indexed + * @param indexMissing whether missing values are indexed + * @param additionalFields additional fields not explicitly mapped + */ + public GeoField(V identifier, V attribute, boolean sortable, boolean unNormalizedForm, boolean noIndex, + boolean indexEmpty, boolean indexMissing, Map additionalFields) { + super(identifier, attribute, FieldType.GEO, sortable, unNormalizedForm, noIndex, indexEmpty, indexMissing, + additionalFields); + } + + } + + /** + * Represents a GEOSHAPE field in the index schema. + *

+ * Geoshape fields provide more advanced functionality than GEO fields, supporting both points and shapes with geographical + * or Cartesian coordinates. + * + * @param Value type. + * @see Geoshape + * Fields + * @since 6.8 + */ + public static class GeoshapeField extends Field { + + /** + * Coordinate system for geoshape fields. + */ + public enum CoordinateSystem { + /** + * Cartesian (planar) coordinates. + */ + FLAT, + /** + * Spherical (geographical) coordinates. + */ + SPHERICAL + } + + private final CoordinateSystem coordinateSystem; + + /** + * Constructor for GeoshapeField. + * + * @param identifier the field identifier + * @param attribute the attribute name (may be null) + * @param sortable whether the field is sortable + * @param unNormalizedForm whether unnormalized form is used + * @param noIndex whether the field is not indexed + * @param indexEmpty whether empty values are indexed + * @param indexMissing whether missing values are indexed + * @param coordinateSystem the coordinate system (may be null) + * @param additionalFields additional fields not explicitly mapped + */ + public GeoshapeField(V identifier, V attribute, boolean sortable, boolean unNormalizedForm, boolean noIndex, + boolean indexEmpty, boolean indexMissing, CoordinateSystem coordinateSystem, + Map additionalFields) { + super(identifier, attribute, FieldType.GEOSHAPE, sortable, unNormalizedForm, noIndex, indexEmpty, indexMissing, + additionalFields); + this.coordinateSystem = coordinateSystem; + } + + /** + * Get the coordinate system. + * + * @return the coordinate system, or null if not set + */ + public CoordinateSystem getCoordinateSystem() { + return coordinateSystem; + } + + } + + /** + * Represents a VECTOR field in the index schema. + *

+ * Vector fields are floating-point vectors typically generated by external machine learning models, used for similarity + * search. + * + * @param Value type. + * @see Vector + * Fields + * @since 6.8 + */ + public static class VectorField extends Field { + + /** + * Vector similarity index algorithms. + */ + public enum Algorithm { + /** + * Brute force algorithm. + */ + FLAT, + /** + * Hierarchical, navigable, small world algorithm. + */ + HNSW, + /** + * SVS-VAMANA algorithm for high-performance approximate vector search. + * + * @since Redis 8.2 + */ + SVS_VAMANA + } + + private final Algorithm algorithm; + + private final Map attributes; + + /** + * Constructor for VectorField. + * + * @param identifier the field identifier + * @param attribute the attribute name (may be null) + * @param sortable whether the field is sortable + * @param unNormalizedForm whether unnormalized form is used + * @param noIndex whether the field is not indexed + * @param indexEmpty whether empty values are indexed + * @param indexMissing whether missing values are indexed + * @param algorithm the vector algorithm (may be null) + * @param attributes vector-specific attributes (DIM, DISTANCE_METRIC, TYPE, etc.) + * @param additionalFields additional fields not explicitly mapped + */ + public VectorField(V identifier, V attribute, boolean sortable, boolean unNormalizedForm, boolean noIndex, + boolean indexEmpty, boolean indexMissing, Algorithm algorithm, Map attributes, + Map additionalFields) { + super(identifier, attribute, FieldType.VECTOR, sortable, unNormalizedForm, noIndex, indexEmpty, indexMissing, + additionalFields); + this.algorithm = algorithm; + this.attributes = new LinkedHashMap<>(attributes); + } + + /** + * Get the vector algorithm. + * + * @return the algorithm, or null if not set + */ + public Algorithm getAlgorithm() { + return algorithm; + } + + /** + * Get the vector attributes (DIM, DISTANCE_METRIC, TYPE, etc.). + * + * @return a map of vector attributes + */ + public Map getAttributes() { + return Collections.unmodifiableMap(attributes); + } + + } + +} diff --git a/src/main/java/io/lettuce/core/search/IndexInfoParser.java b/src/main/java/io/lettuce/core/search/IndexInfoParser.java new file mode 100644 index 0000000000..97964019d8 --- /dev/null +++ b/src/main/java/io/lettuce/core/search/IndexInfoParser.java @@ -0,0 +1,1038 @@ +/* + * Copyright 2025, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + */ +package io.lettuce.core.search; + +import io.lettuce.core.codec.RedisCodec; +import io.lettuce.core.codec.StringCodec; +import io.lettuce.core.internal.LettuceAssert; +import io.lettuce.core.output.ComplexData; +import io.lettuce.core.output.ComplexDataParser; + +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.util.*; + +/** + * Parser for FT.INFO command results that handles both RESP2 and RESP3 protocol responses. + * + *

+ * This parser automatically detects the Redis protocol version and switches between RESP2 and RESP3 parsing strategies. + *

+ * + *

+ * The result is an {@link IndexInfo} object containing index information and statistics. The structure includes various metrics + * about the index such as number of documents, memory usage, indexing statistics, and field definitions. + *

+ * + * @param Key type. + * @param Value type. + * @author Julien Ruaux + * @since 6.8 + */ +public class IndexInfoParser implements ComplexDataParser> { + + private final RedisCodec codec; + + // Known field names for categorization + private static final Set SIZE_STATS = new HashSet<>(Arrays.asList("inverted_sz_mb", "vector_index_sz_mb", + "total_inverted_index_blocks", "offset_vectors_sz_mb", "doc_table_size_mb", "sortable_values_size_mb", + "key_table_size_mb", "geoshapes_sz_mb", "records_per_doc_avg", "bytes_per_record_avg", "offsets_per_term_avg", + "offset_bits_per_record_avg", "tag_overhead_sz_mb", "text_overhead_sz_mb", "total_index_memory_sz_mb")); + + private static final Set INDEXING_STATS = new HashSet<>(Arrays.asList("hash_indexing_failures", + "total_indexing_time", "indexing", "percent_indexed", "number_of_uses", "cleaning")); + + public IndexInfoParser(RedisCodec codec) { + LettuceAssert.notNull(codec, "Codec must not be null"); + this.codec = codec; + } + + /** + * Parse the FT.INFO response data, automatically detecting RESP2 vs RESP3 format. + * + * @param data the response data from Redis + * @return an IndexInfo object containing index information and statistics + */ + @Override + public IndexInfo parse(ComplexData data) { + if (data == null) { + return new IndexInfo<>(); + } + + if (data.isList()) { + return parseResp2(data); + } + + return parseResp3(data); + } + + /** + * Parse FT.INFO response in RESP2 format (array-based with alternating key-value pairs). + */ + private IndexInfo parseResp2(ComplexData data) { + List infoArray = data.getDynamicList(); + IndexInfo indexInfo = new IndexInfo<>(); + Map sizeStats = new HashMap<>(); + Map indexingStats = new HashMap<>(); + Map gcStats = new HashMap<>(); + Map cursorStats = new HashMap<>(); + Map dialectStats = new HashMap<>(); + Map indexErrors = new HashMap<>(); + + // RESP2: Parse alternating key-value pairs + for (int i = 0; i < infoArray.size(); i += 2) { + if (i + 1 >= infoArray.size()) { + break; // Incomplete pair, skip + } + + String key = decodeStringAsString(infoArray.get(i)); + Object value = infoArray.get(i + 1); + populateIndexInfo(indexInfo, key, value, sizeStats, indexingStats, gcStats, cursorStats, dialectStats, indexErrors); + } + + buildStatisticsObjects(indexInfo, sizeStats, indexingStats, gcStats, cursorStats, dialectStats, indexErrors); + return indexInfo; + } + + /** + * Parse FT.INFO response in RESP3 format (native map structure). + */ + private IndexInfo parseResp3(ComplexData data) { + Map rawMap = data.getDynamicMap(); + IndexInfo indexInfo = new IndexInfo<>(); + Map sizeStats = new HashMap<>(); + Map indexingStats = new HashMap<>(); + Map gcStats = new HashMap<>(); + Map cursorStats = new HashMap<>(); + Map dialectStats = new HashMap<>(); + Map indexErrors = new HashMap<>(); + + for (Map.Entry entry : rawMap.entrySet()) { + String key = decodeStringAsString(entry.getKey()); + Object value = entry.getValue(); + populateIndexInfo(indexInfo, key, value, sizeStats, indexingStats, gcStats, cursorStats, dialectStats, indexErrors); + } + + buildStatisticsObjects(indexInfo, sizeStats, indexingStats, gcStats, cursorStats, dialectStats, indexErrors); + return indexInfo; + } + + /** + * Populate the IndexInfo object based on the key-value pair. + */ + private void populateIndexInfo(IndexInfo indexInfo, String key, Object value, Map sizeStats, + Map indexingStats, Map gcStats, Map cursorStats, + Map dialectStats, Map indexErrors) { + switch (key) { + case "index_name": + indexInfo.setIndexName(decodeStringAsString(value)); + break; + case "index_options": + parseIndexOptions(indexInfo, value); + break; + case "index_definition": + parseIndexDefinition(indexInfo, value); + break; + case "attributes": + parseAttributes(indexInfo, value); + break; + case "num_docs": + indexInfo.setNumDocs(parseLong(value)); + break; + case "max_doc_id": + indexInfo.setMaxDocId(parseLong(value)); + break; + case "num_terms": + indexInfo.setNumTerms(parseLong(value)); + break; + case "num_records": + indexInfo.setNumRecords(parseLong(value)); + break; + case "gc_stats": + parseGcStats(gcStats, value); + break; + case "cursor_stats": + parseCursorStats(cursorStats, value); + break; + case "dialect_stats": + parseDialectStats(dialectStats, value); + break; + case "Index Errors": + parseIndexErrors(indexErrors, value); + break; + case "field statistics": + parseFieldStatistics(indexInfo, value); + break; + default: + // Categorize statistics fields + if (SIZE_STATS.contains(key)) { + sizeStats.put(key, parseValue(value)); + } else if (INDEXING_STATS.contains(key)) { + indexingStats.put(key, parseValue(value)); + } else { + // Store unrecognized fields for forward compatibility + indexInfo.putAdditionalField(key, parseValue(value)); + } + break; + } + } + + private void parseIndexOptions(IndexInfo indexInfo, Object value) { + List options = parseListValue(value); + for (Object option : options) { + String optionStr = decodeStringAsString(option).toUpperCase(); + switch (optionStr) { + case "NOOFFSETS": + indexInfo.setNoOffsets(true); + break; + case "NOHL": + indexInfo.setNoHighlight(true); + break; + case "NOFIELDS": + indexInfo.setNoFields(true); + break; + case "NOFREQS": + indexInfo.setNoFrequency(true); + break; + case "MAXTEXTFIELDS": + indexInfo.setMaxTextFields(true); + break; + case "SKIPINITIALSCAN": + indexInfo.setSkipInitialScan(true); + break; + // Ignore unknown options + } + } + } + + private void parseIndexDefinition(IndexInfo indexInfo, Object value) { + Map defMap = new LinkedHashMap<>(); + if (value instanceof ComplexData) { + ComplexData data = (ComplexData) value; + if (data.isList()) { + // RESP2: alternating key-value pairs + List list = data.getDynamicList(); + for (int i = 0; i < list.size(); i += 2) { + if (i + 1 < list.size()) { + // Don't call parseValue here - keep raw values for metadata fields + defMap.put(decodeStringAsString(list.get(i)), list.get(i + 1)); + } + } + } else if (data.isMap()) { + // RESP3: map + Map map = data.getDynamicMap(); + for (Map.Entry entry : map.entrySet()) { + // Don't call parseValue here - keep raw values for metadata fields + defMap.put(decodeStringAsString(entry.getKey()), entry.getValue()); + } + } + } + + // Create IndexDefinition from the map + IndexInfo.IndexDefinition indexDefinition = new IndexInfo.IndexDefinition<>(); + + String keyTypeStr = getString(defMap, "key_type"); + if (keyTypeStr != null) { + try { + indexDefinition.setKeyType(IndexInfo.IndexDefinition.TargetType.valueOf(keyTypeStr.toUpperCase())); + } catch (IllegalArgumentException e) { + // Unknown key type, leave as null + } + } + + Object prefixesObj = defMap.get("prefixes"); + if (prefixesObj instanceof ComplexData) { + ComplexData prefixesData = (ComplexData) prefixesObj; + List prefixesList = prefixesData.getDynamicList(); + for (Object prefix : prefixesList) { + indexDefinition.addPrefix(decodeValue(prefix)); + } + } else if (prefixesObj instanceof List) { + for (Object prefix : (List) prefixesObj) { + indexDefinition.addPrefix(decodeValue(prefix)); + } + } + + V filter = getValue(defMap, "filter"); + if (filter != null) { + indexDefinition.setFilter(filter); + } + + V languageField = getValue(defMap, "language_field"); + if (languageField != null) { + indexDefinition.setLanguageField(languageField); + } + + V scoreField = getValue(defMap, "score_field"); + if (scoreField != null) { + indexDefinition.setScoreField(scoreField); + } + + V payloadField = getValue(defMap, "payload_field"); + if (payloadField != null) { + indexDefinition.setPayloadField(payloadField); + } + + Double defaultScore = getDouble(defMap, "default_score"); + if (defaultScore != null) { + indexDefinition.setDefaultScore(defaultScore); + } + + V defaultLanguage = getValue(defMap, "default_language"); + if (defaultLanguage != null) { + indexDefinition.setDefaultLanguage(defaultLanguage); + } + + // Collect additional fields + Map additionalFields = new LinkedHashMap<>(defMap); + additionalFields.remove("key_type"); + additionalFields.remove("prefixes"); + additionalFields.remove("filter"); + additionalFields.remove("language_field"); + additionalFields.remove("score_field"); + additionalFields.remove("payload_field"); + additionalFields.remove("default_score"); + additionalFields.remove("default_language"); + + for (Map.Entry entry : additionalFields.entrySet()) { + indexDefinition.putAdditionalField(entry.getKey(), entry.getValue()); + } + + indexInfo.setIndexDefinition(indexDefinition); + } + + private void parseAttributes(IndexInfo indexInfo, Object value) { + List attributesList = parseListValue(value); + for (Object attr : attributesList) { + Map attributeMap = new LinkedHashMap<>(); + if (attr instanceof List) { + // Handle case where parseListValue already parsed the ComplexData into a List + // RESP2: alternating key-value pairs + List list = (List) attr; + for (int i = 0; i < list.size(); i += 2) { + if (i + 1 < list.size()) { + String key = decodeStringAsString(list.get(i)); + Object val = list.get(i + 1); + // Handle special case where SORTABLE has a flag as its value (e.g., SORTABLE NOSTEM) + // In this case, we need to add both SORTABLE and the flag (NOSTEM) as separate keys + if ("SORTABLE".equals(key)) { + attributeMap.put(key, val); + String valStr = decodeStringAsString(val); + // Check if the value is a known flag + if (isFieldFlag(valStr)) { + attributeMap.put(valStr, valStr); + } + } else { + attributeMap.put(key, val); + } + } + } + } else if (attr instanceof Map) { + // Handle case where parseListValue already parsed the map + @SuppressWarnings("unchecked") + Map map = (Map) attr; + attributeMap.putAll(map); + } else if (attr instanceof ComplexData) { + ComplexData data = (ComplexData) attr; + if (data.isList()) { + // RESP2: alternating key-value pairs + List list = data.getDynamicList(); + for (int i = 0; i < list.size(); i += 2) { + if (i + 1 < list.size()) { + // Don't call parseValue - keep raw values for metadata fields + String key = decodeStringAsString(list.get(i)); + Object val = list.get(i + 1); + // Handle special case where SORTABLE has a flag as its value + if ("SORTABLE".equals(key)) { + attributeMap.put(key, val); + String valStr = decodeStringAsString(val); + if (isFieldFlag(valStr)) { + attributeMap.put(valStr, valStr); + } + } else { + attributeMap.put(key, val); + } + } + } + } else if (data.isMap()) { + // RESP3: map + Map map = data.getDynamicMap(); + for (Map.Entry entry : map.entrySet()) { + // Don't call parseValue - keep raw values for metadata fields + attributeMap.put(decodeStringAsString(entry.getKey()), entry.getValue()); + } + } + } + IndexInfo.Field field = createFieldFromMap(attributeMap); + if (field != null) { + indexInfo.addField(field); + } + } + } + + /** + * Check if a string is a known field flag. + */ + private boolean isFieldFlag(String str) { + return "NOSTEM".equals(str) || "UNF".equals(str) || "NOINDEX".equals(str) || "PHONETIC".equals(str) + || "WITHSUFFIXTRIE".equals(str) || "INDEXEMPTY".equals(str) || "INDEXMISSING".equals(str); + } + + /** + * Creates a Field object from the attribute map based on the field type. + * + * @param attributeMap the map containing field attributes + * @return the appropriate Field subclass instance, or null if type is missing + */ + private IndexInfo.Field createFieldFromMap(Map attributeMap) { + // Extract common fields + V identifier = getValue(attributeMap, "identifier"); + V attribute = getValue(attributeMap, "attribute"); + String type = getString(attributeMap, "type"); + + if (identifier == null || type == null) { + return null; // Invalid field definition + } + + // Extract common boolean flags + boolean sortable = getBoolean(attributeMap, "SORTABLE"); + boolean unNormalizedForm = getBoolean(attributeMap, "UNF"); + boolean noIndex = getBoolean(attributeMap, "NOINDEX"); + boolean indexEmpty = getBoolean(attributeMap, "INDEXEMPTY"); + boolean indexMissing = getBoolean(attributeMap, "INDEXMISSING"); + + // Collect additional fields not explicitly mapped + Map additionalFields = new LinkedHashMap<>(attributeMap); + additionalFields.remove("identifier"); + additionalFields.remove("attribute"); + additionalFields.remove("type"); + additionalFields.remove("SORTABLE"); + additionalFields.remove("UNF"); + additionalFields.remove("NOINDEX"); + additionalFields.remove("INDEXEMPTY"); + additionalFields.remove("INDEXMISSING"); + + // Create the appropriate Field subclass based on type + switch (type.toUpperCase()) { + case "TEXT": + return createTextField(identifier, attribute, sortable, unNormalizedForm, noIndex, indexEmpty, indexMissing, + attributeMap, additionalFields); + case "NUMERIC": + return new IndexInfo.NumericField<>(identifier, attribute, sortable, unNormalizedForm, noIndex, indexEmpty, + indexMissing, additionalFields); + case "TAG": + return createTagField(identifier, attribute, sortable, unNormalizedForm, noIndex, indexEmpty, indexMissing, + attributeMap, additionalFields); + case "GEO": + return new IndexInfo.GeoField<>(identifier, attribute, sortable, unNormalizedForm, noIndex, indexEmpty, + indexMissing, additionalFields); + case "GEOSHAPE": + return createGeoshapeField(identifier, attribute, sortable, unNormalizedForm, noIndex, indexEmpty, indexMissing, + attributeMap, additionalFields); + case "VECTOR": + return createVectorField(identifier, attribute, sortable, unNormalizedForm, noIndex, indexEmpty, indexMissing, + attributeMap, additionalFields); + default: + // Unknown field type - store all data in additionalFields + additionalFields.put("type", type); + return new IndexInfo.NumericField<>(identifier, attribute, sortable, unNormalizedForm, noIndex, indexEmpty, + indexMissing, additionalFields); + } + } + + private IndexInfo.TextField createTextField(V identifier, V attribute, boolean sortable, boolean unNormalizedForm, + boolean noIndex, boolean indexEmpty, boolean indexMissing, Map attributeMap, + Map additionalFields) { + Double weight = parseDouble(attributeMap.get("WEIGHT")); + boolean noStem = getBoolean(attributeMap, "NOSTEM"); + String phonetic = getString(attributeMap, "PHONETIC"); + boolean withSuffixTrie = getBoolean(attributeMap, "WITHSUFFIXTRIE"); + + additionalFields.remove("WEIGHT"); + additionalFields.remove("NOSTEM"); + additionalFields.remove("PHONETIC"); + additionalFields.remove("WITHSUFFIXTRIE"); + + return new IndexInfo.TextField<>(identifier, attribute, sortable, unNormalizedForm, noIndex, indexEmpty, indexMissing, + weight, noStem, phonetic, withSuffixTrie, additionalFields); + } + + private IndexInfo.TagField createTagField(V identifier, V attribute, boolean sortable, boolean unNormalizedForm, + boolean noIndex, boolean indexEmpty, boolean indexMissing, Map attributeMap, + Map additionalFields) { + String separator = getString(attributeMap, "SEPARATOR"); + boolean caseSensitive = getBoolean(attributeMap, "CASESENSITIVE"); + boolean withSuffixTrie = getBoolean(attributeMap, "WITHSUFFIXTRIE"); + + additionalFields.remove("SEPARATOR"); + additionalFields.remove("CASESENSITIVE"); + additionalFields.remove("WITHSUFFIXTRIE"); + + return new IndexInfo.TagField<>(identifier, attribute, sortable, unNormalizedForm, noIndex, indexEmpty, indexMissing, + separator, caseSensitive, withSuffixTrie, additionalFields); + } + + private IndexInfo.GeoshapeField createGeoshapeField(V identifier, V attribute, boolean sortable, + boolean unNormalizedForm, boolean noIndex, boolean indexEmpty, boolean indexMissing, + Map attributeMap, Map additionalFields) { + String coordinateSystemStr = getString(attributeMap, "COORD_SYSTEM"); + IndexInfo.GeoshapeField.CoordinateSystem coordinateSystem = null; + if (coordinateSystemStr != null) { + try { + coordinateSystem = IndexInfo.GeoshapeField.CoordinateSystem.valueOf(coordinateSystemStr.toUpperCase()); + } catch (IllegalArgumentException e) { + // Unknown coordinate system, leave as null + } + } + + additionalFields.remove("COORD_SYSTEM"); + + return new IndexInfo.GeoshapeField<>(identifier, attribute, sortable, unNormalizedForm, noIndex, indexEmpty, + indexMissing, coordinateSystem, additionalFields); + } + + private IndexInfo.VectorField createVectorField(V identifier, V attribute, boolean sortable, boolean unNormalizedForm, + boolean noIndex, boolean indexEmpty, boolean indexMissing, Map attributeMap, + Map additionalFields) { + String algorithmStr = getString(attributeMap, "ALGORITHM"); + IndexInfo.VectorField.Algorithm algorithm = null; + if (algorithmStr != null) { + try { + // Handle SVS-VAMANA special case + String algName = algorithmStr.toUpperCase().replace("-", "_"); + algorithm = IndexInfo.VectorField.Algorithm.valueOf(algName); + } catch (IllegalArgumentException e) { + // Unknown algorithm, leave as null + } + } + + // Vector attributes are stored in a nested map + Map vectorAttributes = new LinkedHashMap<>(); + for (Map.Entry entry : attributeMap.entrySet()) { + String key = entry.getKey(); + // Skip common fields and algorithm + if (!key.equals("identifier") && !key.equals("attribute") && !key.equals("type") && !key.equals("ALGORITHM") + && !key.equals("SORTABLE") && !key.equals("UNF") && !key.equals("NOINDEX") && !key.equals("INDEXEMPTY") + && !key.equals("INDEXMISSING")) { + vectorAttributes.put(key, entry.getValue()); + } + } + + additionalFields.remove("ALGORITHM"); + // Remove vector attributes from additionalFields as they're in vectorAttributes + for (String key : vectorAttributes.keySet()) { + additionalFields.remove(key); + } + + return new IndexInfo.VectorField<>(identifier, attribute, sortable, unNormalizedForm, noIndex, indexEmpty, indexMissing, + algorithm, vectorAttributes, additionalFields); + } + + private void parseGcStats(Map gcStats, Object value) { + if (value instanceof ComplexData) { + ComplexData data = (ComplexData) value; + if (data.isList()) { + // RESP2: alternating key-value pairs + List list = data.getDynamicList(); + for (int i = 0; i < list.size(); i += 2) { + if (i + 1 < list.size()) { + String key = decodeStringAsString(list.get(i)); + Object parsedValue = parseValue(list.get(i + 1)); + gcStats.put(key, parsedValue); + } + } + } else if (data.isMap()) { + // RESP3: map + Map map = data.getDynamicMap(); + for (Map.Entry entry : map.entrySet()) { + String key = decodeStringAsString(entry.getKey()); + Object parsedValue = parseValue(entry.getValue()); + gcStats.put(key, parsedValue); + } + } + } + } + + private void parseCursorStats(Map cursorStats, Object value) { + if (value instanceof ComplexData) { + ComplexData data = (ComplexData) value; + if (data.isList()) { + // RESP2: alternating key-value pairs + List list = data.getDynamicList(); + for (int i = 0; i < list.size(); i += 2) { + if (i + 1 < list.size()) { + String key = decodeStringAsString(list.get(i)); + Object parsedValue = parseValue(list.get(i + 1)); + cursorStats.put(key, parsedValue); + } + } + } else if (data.isMap()) { + // RESP3: map + Map map = data.getDynamicMap(); + for (Map.Entry entry : map.entrySet()) { + String key = decodeStringAsString(entry.getKey()); + Object parsedValue = parseValue(entry.getValue()); + cursorStats.put(key, parsedValue); + } + } + } + } + + private void parseDialectStats(Map dialectStats, Object value) { + if (value instanceof ComplexData) { + ComplexData data = (ComplexData) value; + if (data.isList()) { + // RESP2: alternating key-value pairs + List list = data.getDynamicList(); + for (int i = 0; i < list.size(); i += 2) { + if (i + 1 < list.size()) { + dialectStats.put(decodeStringAsString(list.get(i)), parseLong(list.get(i + 1))); + } + } + } else if (data.isMap()) { + // RESP3: map + Map map = data.getDynamicMap(); + for (Map.Entry entry : map.entrySet()) { + dialectStats.put(decodeStringAsString(entry.getKey()), parseLong(entry.getValue())); + } + } + } + } + + private void parseIndexErrors(Map indexErrors, Object value) { + if (value instanceof ComplexData) { + ComplexData data = (ComplexData) value; + if (data.isList()) { + // RESP2: alternating key-value pairs + List list = data.getDynamicList(); + for (int i = 0; i < list.size(); i += 2) { + if (i + 1 < list.size()) { + indexErrors.put(decodeStringAsString(list.get(i)), parseValue(list.get(i + 1))); + } + } + } else if (data.isMap()) { + // RESP3: map + Map map = data.getDynamicMap(); + for (Map.Entry entry : map.entrySet()) { + indexErrors.put(decodeStringAsString(entry.getKey()), parseValue(entry.getValue())); + } + } + } + } + + private void parseFieldStatistics(IndexInfo indexInfo, Object value) { + List fieldStatsList = parseListValue(value); + for (Object fieldStat : fieldStatsList) { + Map fieldStatMap = new LinkedHashMap<>(); + if (fieldStat instanceof ComplexData) { + ComplexData data = (ComplexData) fieldStat; + if (data.isList()) { + // RESP2: alternating key-value pairs + List list = data.getDynamicList(); + for (int i = 0; i < list.size(); i += 2) { + if (i + 1 < list.size()) { + fieldStatMap.put(decodeStringAsString(list.get(i)), parseValue(list.get(i + 1))); + } + } + } else if (data.isMap()) { + // RESP3: map + Map map = data.getDynamicMap(); + for (Map.Entry entry : map.entrySet()) { + fieldStatMap.put(decodeStringAsString(entry.getKey()), parseValue(entry.getValue())); + } + } + } + + // Build FieldErrorStatistics from the map + IndexInfo.FieldErrorStatistics fieldErrorStats = new IndexInfo.FieldErrorStatistics(); + + String identifier = (String) fieldStatMap.get("identifier"); + if (identifier != null) { + fieldErrorStats.setIdentifier(identifier); + } + + String attribute = (String) fieldStatMap.get("attribute"); + if (attribute != null) { + fieldErrorStats.setAttribute(attribute); + } + + Object indexErrorsObj = fieldStatMap.get("Index Errors"); + if (indexErrorsObj != null) { + Map errorsMap = new LinkedHashMap<>(); + if (indexErrorsObj instanceof ComplexData) { + ComplexData errorsData = (ComplexData) indexErrorsObj; + if (errorsData.isList()) { + List errorsList = errorsData.getDynamicList(); + for (int i = 0; i < errorsList.size(); i += 2) { + if (i + 1 < errorsList.size()) { + errorsMap.put(decodeStringAsString(errorsList.get(i)), parseValue(errorsList.get(i + 1))); + } + } + } else if (errorsData.isMap()) { + Map errorsRawMap = errorsData.getDynamicMap(); + for (Map.Entry entry : errorsRawMap.entrySet()) { + errorsMap.put(decodeStringAsString(entry.getKey()), parseValue(entry.getValue())); + } + } + } + IndexInfo.ErrorStatistics errors = buildErrorStatistics(errorsMap); + fieldErrorStats.setErrors(errors); + } + + indexInfo.addFieldStatistic(fieldErrorStats); + } + } + + /** + * Parse a value which can be a simple type, list, or nested map. + */ + private Object parseValue(Object value) { + if (value instanceof ByteBuffer) { + return decodeValue(value); + } else if (value instanceof ComplexData) { + ComplexData complexData = (ComplexData) value; + if (complexData.isList()) { + return parseList(complexData); + } else if (complexData.isMap()) { + return parseMap(complexData); + } + } else if (value instanceof Number) { + return value; + } + return value; + } + + /** + * Parse a list value. + */ + private List parseList(ComplexData data) { + List list = data.getDynamicList(); + List result = new ArrayList<>(list.size()); + for (Object item : list) { + result.add(parseValue(item)); + } + return result; + } + + private List parseListValue(Object value) { + if (value instanceof ComplexData) { + return parseList((ComplexData) value); + } + return new ArrayList<>(); + } + + /** + * Parse a map value. + */ + private Map parseMap(ComplexData data) { + Map rawMap = data.getDynamicMap(); + Map result = new LinkedHashMap<>(); + for (Map.Entry entry : rawMap.entrySet()) { + String key = decodeStringAsString(entry.getKey()); + Object value = parseValue(entry.getValue()); + result.put(key, value); + } + return result; + } + + /** + * Build strongly-typed statistics objects from the collected maps. + */ + private void buildStatisticsObjects(IndexInfo indexInfo, Map sizeMap, Map indexingMap, + Map gcMap, Map cursorMap, Map dialectMap, + Map indexErrorsMap) { + // Build SizeStatistics + if (!sizeMap.isEmpty()) { + IndexInfo.SizeStatistics sizeStats = indexInfo.getSizeStats(); + sizeStats.setInvertedSizeMb(parseDouble(sizeMap.get("inverted_sz_mb"))); + sizeStats.setVectorIndexSizeMb(parseDouble(sizeMap.get("vector_index_sz_mb"))); + sizeStats.setTotalInvertedIndexBlocks(parseLong(sizeMap.get("total_inverted_index_blocks"))); + sizeStats.setOffsetVectorsSizeMb(parseDouble(sizeMap.get("offset_vectors_sz_mb"))); + sizeStats.setDocTableSizeMb(parseDouble(sizeMap.get("doc_table_size_mb"))); + sizeStats.setSortableValuesSizeMb(parseDouble(sizeMap.get("sortable_values_size_mb"))); + sizeStats.setKeyTableSizeMb(parseDouble(sizeMap.get("key_table_size_mb"))); + sizeStats.setGeoshapesSizeMb(parseDouble(sizeMap.get("geoshapes_sz_mb"))); + sizeStats.setRecordsPerDocAvg(parseDouble(sizeMap.get("records_per_doc_avg"))); + sizeStats.setBytesPerRecordAvg(parseDouble(sizeMap.get("bytes_per_record_avg"))); + sizeStats.setOffsetsPerTermAvg(parseDouble(sizeMap.get("offsets_per_term_avg"))); + sizeStats.setOffsetBitsPerRecordAvg(parseDouble(sizeMap.get("offset_bits_per_record_avg"))); + sizeStats.setTagOverheadSizeMb(parseDouble(sizeMap.get("tag_overhead_sz_mb"))); + sizeStats.setTextOverheadSizeMb(parseDouble(sizeMap.get("text_overhead_sz_mb"))); + sizeStats.setTotalIndexMemorySizeMb(parseDouble(sizeMap.get("total_index_memory_sz_mb"))); + } + + // Build IndexingStatistics + if (!indexingMap.isEmpty()) { + IndexInfo.IndexingStatistics indexingStats = indexInfo.getIndexingStats(); + indexingStats.setHashIndexingFailures(parseLong(indexingMap.get("hash_indexing_failures"))); + indexingStats.setTotalIndexingTime(parseDouble(indexingMap.get("total_indexing_time"))); + indexingStats.setIndexing(parseBoolean(indexingMap.get("indexing"))); + indexingStats.setPercentIndexed(parseDouble(indexingMap.get("percent_indexed"))); + indexingStats.setNumberOfUses(parseLong(indexingMap.get("number_of_uses"))); + indexingStats.setCleaning(parseBoolean(indexingMap.get("cleaning"))); + } + + // Build GcStatistics + if (!gcMap.isEmpty()) { + IndexInfo.GcStatistics gcStats = indexInfo.getGcStats(); + gcStats.setBytesCollected(parseLong(gcMap.get("bytes_collected"))); + gcStats.setTotalMsRun(parseDouble(gcMap.get("total_ms_run"))); + gcStats.setTotalCycles(parseLong(gcMap.get("total_cycles"))); + gcStats.setAverageCycleTimeMs(parseDouble(gcMap.get("average_cycle_time_ms"))); + gcStats.setLastRunTimeMs(parseDouble(gcMap.get("last_run_time_ms"))); + gcStats.setGcNumericTreesMissed(parseLong(gcMap.get("gc_numeric_trees_missed"))); + gcStats.setGcBlocksDenied(parseLong(gcMap.get("gc_blocks_denied"))); + } + + // Build CursorStatistics + if (!cursorMap.isEmpty()) { + IndexInfo.CursorStatistics cursorStats = indexInfo.getCursorStats(); + cursorStats.setGlobalIdle(parseLong(cursorMap.get("global_idle"))); + cursorStats.setGlobalTotal(parseLong(cursorMap.get("global_total"))); + cursorStats.setIndexCapacity(parseLong(cursorMap.get("index_capacity"))); + cursorStats.setIndexTotal(parseLong(cursorMap.get("index_total"))); + } + + // Build DialectStatistics + if (!dialectMap.isEmpty()) { + IndexInfo.DialectStatistics dialectStats = indexInfo.getDialectStats(); + dialectStats.setDialect1(parseLong(dialectMap.get("dialect_1"))); + dialectStats.setDialect2(parseLong(dialectMap.get("dialect_2"))); + dialectStats.setDialect3(parseLong(dialectMap.get("dialect_3"))); + dialectStats.setDialect4(parseLong(dialectMap.get("dialect_4"))); + } + + // Build ErrorStatistics for index errors + if (!indexErrorsMap.isEmpty()) { + IndexInfo.ErrorStatistics errorStats = indexInfo.getIndexErrors(); + errorStats.setIndexingFailures(parseLong(indexErrorsMap.get("indexing failures"))); + String lastError = parseString(indexErrorsMap.get("last indexing error")); + if (lastError != null) { + errorStats.setLastIndexingError(lastError); + } + String lastErrorKey = parseString(indexErrorsMap.get("last indexing error key")); + if (lastErrorKey != null) { + errorStats.setLastIndexingErrorKey(lastErrorKey); + } + } + } + + /** + * Build ErrorStatistics from a map. + */ + private IndexInfo.ErrorStatistics buildErrorStatistics(Map errorsMap) { + IndexInfo.ErrorStatistics errorStats = new IndexInfo.ErrorStatistics(); + errorStats.setIndexingFailures(parseLong(errorsMap.get("indexing failures"))); + String lastError = parseString(errorsMap.get("last indexing error")); + if (lastError != null) { + errorStats.setLastIndexingError(lastError); + } + String lastErrorKey = parseString(errorsMap.get("last indexing error key")); + if (lastErrorKey != null) { + errorStats.setLastIndexingErrorKey(lastErrorKey); + } + return errorStats; + } + + /** + * Parse a Long value from various types. + */ + private long parseLong(Object value) { + if (value instanceof Number) { + return ((Number) value).longValue(); + } else if (value instanceof ByteBuffer) { + String str = decodeStringAsString(value); + try { + return Long.parseLong(str); + } catch (NumberFormatException e) { + return 0; + } + } else if (value instanceof String) { + try { + return Long.parseLong((String) value); + } catch (NumberFormatException e) { + return 0; + } + } + return 0; + } + + /** + * Parse a Double value from various types. + */ + private double parseDouble(Object value) { + if (value instanceof Number) { + return ((Number) value).doubleValue(); + } else if (value instanceof ByteBuffer) { + String str = decodeStringAsString(value); + try { + return Double.parseDouble(str); + } catch (NumberFormatException e) { + return 0; + } + } else if (value instanceof String) { + try { + return Double.parseDouble((String) value); + } catch (NumberFormatException e) { + return 0; + } + } + return 0; + } + + /** + * Parse a Boolean value from various types. Redis returns 0/1 for boolean values. + */ + private boolean parseBoolean(Object value) { + if (value instanceof Boolean) { + return (Boolean) value; + } + if (value instanceof Number) { + return ((Number) value).intValue() != 0; + } + if (value instanceof ByteBuffer) { + String str = decodeStringAsString(value); + try { + return Integer.parseInt(str) != 0; + } catch (NumberFormatException e) { + return Boolean.parseBoolean(str); + } + } + if (value instanceof String) { + try { + return Integer.parseInt((String) value) != 0; + } catch (NumberFormatException e) { + return Boolean.parseBoolean((String) value); + } + } + return false; + } + + /** + * Parse a String value from various types. + */ + private String parseString(Object value) { + if (value == null) { + return null; + } + if (value instanceof String) { + return (String) value; + } + return decodeStringAsString(value); + } + + /** + * Decode a ByteBuffer or other object to type V. + */ + private V decodeValue(Object obj) { + if (obj instanceof ByteBuffer) { + return codec.decodeValue((ByteBuffer) obj); + } + return (V) obj; + } + + /** + * Decode a ByteBuffer or other object to a String. This is used for keys and metadata that should always be strings, + * regardless of the codec. + */ + private String decodeStringAsString(Object obj) { + if (obj instanceof ByteBuffer) { + return StringCodec.UTF8.decodeValue((ByteBuffer) obj); + } + if (obj instanceof byte[]) { + return new String((byte[]) obj, StandardCharsets.UTF_8); + } + return obj != null ? obj.toString() : null; + } + + /** + * Encode a String to type K using the codec. + */ + private K encodeKey(String str) { + if (str == null) { + return null; + } + ByteBuffer encoded = StringCodec.UTF8.encodeKey(str); + return codec.decodeKey(encoded); + } + + /** + * Get a value of type V from the map. + */ + private V getValue(Map map, String key) { + Object value = map.get(key); + if (value == null) { + return null; + } + // Decode ByteBuffer to type V + return decodeValue(value); + } + + /** + * Get a String value from the map. + */ + private String getString(Map map, String key) { + Object value = map.get(key); + if (value == null) { + return null; + } + if (value instanceof String) { + return (String) value; + } + // Decode ByteBuffer to string + return decodeStringAsString(value); + } + + /** + * Get a Double value from the map. + */ + private Double getDouble(Map map, String key) { + Object value = map.get(key); + if (value == null) { + return null; + } + if (value instanceof Number) { + return ((Number) value).doubleValue(); + } + // Decode ByteBuffer to string and parse as double + String str = decodeStringAsString(value); + try { + return Double.parseDouble(str); + } catch (NumberFormatException e) { + return null; + } + } + + /** + * Get a boolean value from the map. Returns true if the key exists (regardless of value). Also checks if the key is in the + * "flags" list. + */ + private boolean getBoolean(Map map, String key) { + // Check if key exists directly in the map + if (map.containsKey(key)) { + return true; + } + // Check if key is in the "flags" list + Object flags = map.get("flags"); + if (flags instanceof List) { + List flagsList = (List) flags; + for (Object flag : flagsList) { + String flagStr = null; + if (flag instanceof String) { + flagStr = (String) flag; + } else { + flagStr = decodeStringAsString(flag); + } + if (key.equals(flagStr)) { + return true; + } + } + } + return false; + } + +} diff --git a/src/main/kotlin/io/lettuce/core/api/coroutines/RediSearchCoroutinesCommands.kt b/src/main/kotlin/io/lettuce/core/api/coroutines/RediSearchCoroutinesCommands.kt index 5d7cc2f300..13b3f89f7d 100644 --- a/src/main/kotlin/io/lettuce/core/api/coroutines/RediSearchCoroutinesCommands.kt +++ b/src/main/kotlin/io/lettuce/core/api/coroutines/RediSearchCoroutinesCommands.kt @@ -10,6 +10,7 @@ package io.lettuce.core.api.coroutines import io.lettuce.core.ExperimentalLettuceCoroutinesApi import io.lettuce.core.annotations.Experimental import io.lettuce.core.search.AggregationReply +import io.lettuce.core.search.IndexInfo import io.lettuce.core.search.SearchReply import io.lettuce.core.search.SpellCheckResult import io.lettuce.core.search.Suggestion @@ -629,6 +630,52 @@ interface RediSearchCoroutinesCommands { @Experimental suspend fun ftList(): List + /** + * Return information and statistics about a search index. + * + *

+ * This command returns detailed information and statistics about a specified search index, including configuration, + * schema definition, memory usage, indexing progress, and performance metrics. + *

+ * + *

+ * The returned map contains various categories of information: + *

+ *
    + *
  • General: index_name, index_options, index_definition, attributes, num_docs, max_doc_id, num_terms, num_records
  • + *
  • Size statistics: inverted_sz_mb, vector_index_sz_mb, doc_table_size_mb, sortable_values_size_mb, key_table_size_mb, etc.
  • + *
  • Indexing statistics: hash_indexing_failures, total_indexing_time, indexing, percent_indexed, number_of_uses
  • + *
  • Garbage collection: bytes_collected, total_ms_run, total_cycles, average_cycle_time_ms, last_run_time_ms
  • + *
  • Cursor statistics: global_idle, global_total, index_capacity, index_total
  • + *
  • Dialect statistics: Usage counts for each query dialect (1-4)
  • + *
  • Error statistics: Indexing failures and errors per field
  • + *
+ * + *

+ * Key use cases: + *

+ *
    + *
  • Monitoring: Track index health, memory usage, and performance
  • + *
  • Debugging: Identify indexing failures and errors
  • + *
  • Capacity planning: Analyze memory consumption and growth trends
  • + *
  • Performance tuning: Review indexing time and garbage collection metrics
  • + *
+ * + *

+ * Time complexity: O(1) + *

+ * + * @param index the index name + * @return a map containing index information and statistics + * @since 6.8 + * @see FT.INFO + * @see #ftCreate(String, CreateArgs, List) + * @see #ftList() + * @see #ftDropindex(String) + */ + @Experimental + suspend fun ftInfo(index: String): IndexInfo? + /** * Dump synonym group contents. * diff --git a/src/main/kotlin/io/lettuce/core/api/coroutines/RediSearchCoroutinesCommandsImpl.kt b/src/main/kotlin/io/lettuce/core/api/coroutines/RediSearchCoroutinesCommandsImpl.kt index 2ee27f6b85..e92799d6da 100644 --- a/src/main/kotlin/io/lettuce/core/api/coroutines/RediSearchCoroutinesCommandsImpl.kt +++ b/src/main/kotlin/io/lettuce/core/api/coroutines/RediSearchCoroutinesCommandsImpl.kt @@ -10,6 +10,7 @@ package io.lettuce.core.api.coroutines import io.lettuce.core.ExperimentalLettuceCoroutinesApi import io.lettuce.core.api.reactive.RediSearchReactiveCommands import io.lettuce.core.search.AggregationReply +import io.lettuce.core.search.IndexInfo import io.lettuce.core.search.SearchReply import io.lettuce.core.search.SpellCheckResult import io.lettuce.core.search.Suggestion @@ -147,6 +148,8 @@ open class RediSearchCoroutinesCommandsImpl(internal val ops: override suspend fun ftList(): List = ops.ftList().asFlow().toList() - + override suspend fun ftInfo(index: String): IndexInfo? = + ops.ftInfo(index).awaitFirstOrNull() } + diff --git a/src/main/templates/io/lettuce/core/api/RediSearchCommands.java b/src/main/templates/io/lettuce/core/api/RediSearchCommands.java index 0ec3cf91b2..9676e72ff0 100644 --- a/src/main/templates/io/lettuce/core/api/RediSearchCommands.java +++ b/src/main/templates/io/lettuce/core/api/RediSearchCommands.java @@ -8,6 +8,7 @@ import io.lettuce.core.annotations.Experimental; import io.lettuce.core.search.AggregationReply; +import io.lettuce.core.search.IndexInfo; import io.lettuce.core.search.SearchReply; import io.lettuce.core.search.SpellCheckResult; import io.lettuce.core.search.Suggestion; @@ -624,6 +625,56 @@ public interface RediSearchCommands { @Experimental List ftList(); + /** + * Return information and statistics about a search index. + * + *

+ * This command returns detailed information and statistics about a specified search index, including configuration, schema + * definition, memory usage, indexing progress, and performance metrics. + *

+ * + *

+ * The returned map contains various categories of information: + *

+ *
    + *
  • General: index_name, index_options, index_definition, attributes, num_docs, max_doc_id, num_terms, + * num_records
  • + *
  • Size statistics: inverted_sz_mb, vector_index_sz_mb, doc_table_size_mb, sortable_values_size_mb, + * key_table_size_mb, etc.
  • + *
  • Indexing statistics: hash_indexing_failures, total_indexing_time, indexing, percent_indexed, + * number_of_uses
  • + *
  • Garbage collection: bytes_collected, total_ms_run, total_cycles, average_cycle_time_ms, + * last_run_time_ms
  • + *
  • Cursor statistics: global_idle, global_total, index_capacity, index_total
  • + *
  • Dialect statistics: Usage counts for each query dialect (1-4)
  • + *
  • Error statistics: Indexing failures and errors per field
  • + *
+ * + *

+ * Key use cases: + *

+ *
    + *
  • Monitoring: Track index health, memory usage, and performance
  • + *
  • Debugging: Identify indexing failures and errors
  • + *
  • Capacity planning: Analyze memory consumption and growth trends
  • + *
  • Performance tuning: Review indexing time and garbage collection metrics
  • + *
+ * + *

+ * Time complexity: O(1) + *

+ * + * @param index the index name + * @return an IndexInfo object containing index information and statistics + * @since 6.8 + * @see FT.INFO + * @see #ftCreate(String, CreateArgs, List) + * @see #ftList() + * @see #ftDropindex(String) + */ + @Experimental + IndexInfo ftInfo(String index); + /** * Dump synonym group contents. * diff --git a/src/test/java/io/lettuce/core/search/IndexInfoTest.java b/src/test/java/io/lettuce/core/search/IndexInfoTest.java new file mode 100644 index 0000000000..f345cc813f --- /dev/null +++ b/src/test/java/io/lettuce/core/search/IndexInfoTest.java @@ -0,0 +1,144 @@ +/* + * Copyright 2024, Redis Ltd. and Contributors + * All rights reserved. + * + * Licensed under the MIT License. + */ +package io.lettuce.core.search; + +import static org.assertj.core.api.Assertions.*; + +import java.util.*; + +import org.junit.jupiter.api.Test; + +/** + * Unit tests for {@link IndexInfo}. + * + * @author Julien Ruaux + */ +class IndexInfoTest { + + @Test + void testIndexInfoBasicProperties() { + IndexInfo info = new IndexInfo<>(); + info.setIndexName("myindex"); + info.setNumDocs(100L); + info.setMaxDocId(150L); + info.setNumTerms(500L); + info.setNumRecords(1000L); + + assertThat(info.getIndexName()).isEqualTo("myindex"); + assertThat(info.getNumDocs()).isEqualTo(100L); + assertThat(info.getMaxDocId()).isEqualTo(150L); + assertThat(info.getNumTerms()).isEqualTo(500L); + assertThat(info.getNumRecords()).isEqualTo(1000L); + } + + @Test + void testIndexInfoOptions() { + IndexInfo info = new IndexInfo<>(); + info.setNoOffsets(true); + info.setNoFrequency(true); + + assertThat(info.isNoOffsets()).isTrue(); + assertThat(info.isNoFrequency()).isTrue(); + assertThat(info.isNoHighlight()).isFalse(); + assertThat(info.isNoFields()).isFalse(); + assertThat(info.isMaxTextFields()).isFalse(); + assertThat(info.isSkipInitialScan()).isFalse(); + } + + @Test + void testIndexInfoDefinition() { + IndexInfo info = new IndexInfo<>(); + IndexInfo.IndexDefinition definition = new IndexInfo.IndexDefinition<>(); + definition.setKeyType(IndexInfo.IndexDefinition.TargetType.HASH); + definition.addPrefix("product:"); + definition.setDefaultScore(1.0); + info.setIndexDefinition(definition); + + assertThat(info.getIndexDefinition()).isNotNull(); + assertThat(info.getIndexDefinition().getKeyType()).isEqualTo(IndexInfo.IndexDefinition.TargetType.HASH); + assertThat(info.getIndexDefinition().getPrefixes()).containsExactly("product:"); + assertThat(info.getIndexDefinition().getDefaultScore()).isEqualTo(1.0); + } + + @Test + void testIndexInfoFields() { + IndexInfo info = new IndexInfo<>(); + + IndexInfo.TextField textField = new IndexInfo.TextField("title", null, false, false, false, false, false, null, false, + null, false, new HashMap<>()); + info.addField(textField); + + IndexInfo.NumericField numericField = new IndexInfo.NumericField("price", null, false, false, false, false, false, + new HashMap<>()); + info.addField(numericField); + + assertThat(info.getFields()).hasSize(2); + assertThat(info.getFields().get(0)).isInstanceOf(IndexInfo.TextField.class); + assertThat(info.getFields().get(0).getIdentifier()).isEqualTo("title"); + assertThat(info.getFields().get(1)).isInstanceOf(IndexInfo.NumericField.class); + assertThat(info.getFields().get(1).getIdentifier()).isEqualTo("price"); + } + + @Test + void testIndexInfoDialectStatistics() { + IndexInfo info = new IndexInfo<>(); + IndexInfo.DialectStatistics dialectStats = new IndexInfo.DialectStatistics(); + dialectStats.setDialect1(10L); + dialectStats.setDialect2(20L); + dialectStats.setDialect3(30L); + dialectStats.setDialect4(40L); + info.setDialectStats(dialectStats); + + assertThat(info.getDialectStats()).isNotNull(); + assertThat(info.getDialectStats().getDialect1()).isEqualTo(10L); + assertThat(info.getDialectStats().getDialect2()).isEqualTo(20L); + assertThat(info.getDialectStats().getDialect3()).isEqualTo(30L); + assertThat(info.getDialectStats().getDialect4()).isEqualTo(40L); + } + + @Test + void testIndexInfoImmutability() { + IndexInfo info = new IndexInfo<>(); + info.setNoOffsets(true); + IndexInfo.IndexDefinition definition = new IndexInfo.IndexDefinition<>(); + definition.setKeyType(IndexInfo.IndexDefinition.TargetType.HASH); + info.setIndexDefinition(definition); + + // Verify collections are unmodifiable + assertThatThrownBy(() -> info.getIndexDefinition().getPrefixes().add("new_prefix")) + .isInstanceOf(UnsupportedOperationException.class); + } + + @Test + void testIndexInfoToString() { + IndexInfo info = new IndexInfo<>(); + info.setIndexName("myindex"); + info.setNumDocs(100L); + info.setMaxDocId(150L); + info.setNumTerms(500L); + info.setNumRecords(1000L); + + String str = info.toString(); + assertThat(str).contains("myindex").contains("100").contains("150").contains("500").contains("1000"); + } + + @Test + void testIndexInfoObjectsInitialized() { + IndexInfo info = new IndexInfo<>(); + + // Verify all nested objects are initialized (not null) + assertThat(info.getIndexDefinition()).isNotNull(); + assertThat(info.getSizeStats()).isNotNull(); + assertThat(info.getIndexingStats()).isNotNull(); + assertThat(info.getGcStats()).isNotNull(); + assertThat(info.getCursorStats()).isNotNull(); + assertThat(info.getDialectStats()).isNotNull(); + assertThat(info.getIndexErrors()).isNotNull(); + assertThat(info.getFieldStatistics()).isNotNull(); + } + +} diff --git a/src/test/java/io/lettuce/core/search/RediSearchIntegrationTests.java b/src/test/java/io/lettuce/core/search/RediSearchIntegrationTests.java index 4c06d0beef..fb7369db8d 100644 --- a/src/test/java/io/lettuce/core/search/RediSearchIntegrationTests.java +++ b/src/test/java/io/lettuce/core/search/RediSearchIntegrationTests.java @@ -11,10 +11,14 @@ import io.lettuce.core.RedisClient; import io.lettuce.core.RedisCommandExecutionException; import io.lettuce.core.RedisURI; +import io.lettuce.core.api.StatefulRedisConnection; import io.lettuce.core.api.sync.RedisCommands; import io.lettuce.core.codec.ByteArrayCodec; +import io.lettuce.core.codec.RedisCodec; +import io.lettuce.core.codec.StringCodec; import io.lettuce.core.search.arguments.CombineArgs; import io.lettuce.core.search.arguments.CreateArgs; +import io.lettuce.core.search.arguments.DocumentLanguage; import io.lettuce.core.search.arguments.ExplainArgs; import io.lettuce.core.search.arguments.FieldArgs; import io.lettuce.core.search.arguments.HybridArgs; @@ -35,6 +39,7 @@ import io.lettuce.core.search.arguments.VectorFieldArgs; import io.lettuce.test.condition.EnabledOnCommand; import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; @@ -1050,6 +1055,212 @@ void testFtListCommand() { assertThat(finalIndexes.size()).isEqualTo(initialIndexes.size()); } + /** + * Test FT.INFO command for getting index information and statistics. + */ + @Test + void testFtInfoCommand() { + String testIndex = "info-idx"; + + // Create field definitions + FieldArgs titleField = TextFieldArgs. builder().name("title").build(); + FieldArgs contentField = TextFieldArgs. builder().name("content").build(); + FieldArgs priceField = NumericFieldArgs. builder().name("price").sortable().build(); + FieldArgs categoryField = TagFieldArgs. builder().name("category").build(); + + // Create index + CreateArgs createArgs = CreateArgs. builder().withPrefix("product:") + .on(CreateArgs.TargetType.HASH).build(); + assertThat(redis.ftCreate(testIndex, createArgs, Arrays.asList(titleField, contentField, priceField, categoryField))) + .isEqualTo("OK"); + + // Add some test documents + redis.hset("product:1", "title", "Redis Guide"); + redis.hset("product:1", "content", "A comprehensive guide to Redis"); + redis.hset("product:1", "price", "29.99"); + redis.hset("product:1", "category", "books"); + + redis.hset("product:2", "title", "Lettuce Tutorial"); + redis.hset("product:2", "content", "Learn Lettuce Redis client"); + redis.hset("product:2", "price", "19.99"); + redis.hset("product:2", "category", "tutorials"); + + // Get index information + IndexInfo info = redis.ftInfo(testIndex); + + // Verify basic information is present + assertThat(info).isNotNull(); + assertThat(info.getIndexName()).isEqualTo(testIndex); + + // Verify index definition + assertThat(info.getIndexDefinition()).isNotNull(); + assertThat(info.getIndexDefinition().getKeyType()).isNotNull(); + + // Verify fields (schema fields) + assertThat(info.getFields()).isNotEmpty(); + assertThat(info.getFields()).hasSize(4); // title, content, price, category + + // Verify field types + List fields = info.getFields(); + assertThat(fields.stream().filter(f -> f instanceof IndexInfo.TextField).count()).isEqualTo(2); // title, content + assertThat(fields.stream().filter(f -> f instanceof IndexInfo.NumericField).count()).isEqualTo(1); // price + assertThat(fields.stream().filter(f -> f instanceof IndexInfo.TagField).count()).isEqualTo(1); // category + + // Verify specific field properties + IndexInfo.Field priceFieldInfo = fields.stream().filter(f -> "price".equals(f.getIdentifier())).findFirst() + .orElse(null); + assertThat(priceFieldInfo).isNotNull(); + assertThat(priceFieldInfo).isInstanceOf(IndexInfo.NumericField.class); + assertThat(priceFieldInfo.isSortable()).isTrue(); + + // Verify document count + assertThat(info.getNumDocs()).isEqualTo(2L); + + // Verify statistics are present + assertThat(info.getNumTerms()).isNotNull(); + assertThat(info.getNumRecords()).isNotNull(); + + // Verify size statistics are available + if (info.getSizeStats() != null) { + assertThat(info.getSizeStats().getInvertedSizeMb()).isNotNull(); + } + + // Verify indexing statistics are available + if (info.getIndexingStats() != null) { + // These fields should be present + assertThat(info.getIndexingStats().isIndexing()).isNotNull(); + assertThat(info.getIndexingStats().getPercentIndexed()).isNotNull(); + } + + // Cleanup + assertThat(redis.ftDropindex(testIndex)).isEqualTo("OK"); + redis.del("product:1", "product:2"); + } + + /** + * Test FT.INFO command with ByteArrayCodec to verify parsing works correctly with binary codec. + */ + @Test + void testFtInfoCommandWithByteArrayCodec() { + String testIndex = "binary-info-idx"; + + // Create field definitions using String codec + FieldArgs titleField = TextFieldArgs. builder().name("title").build(); + FieldArgs priceField = NumericFieldArgs. builder().name("price").sortable().build(); + FieldArgs categoryField = TagFieldArgs. builder().name("category").separator(",").build(); + + // Create index using String codec + CreateArgs createArgs = CreateArgs. builder().withPrefix("product:") + .on(CreateArgs.TargetType.HASH).build(); + assertThat(redis.ftCreate(testIndex, createArgs, Arrays.asList(titleField, priceField, categoryField))).isEqualTo("OK"); + + // Add test documents + redis.hset("product:1", "title", "Test Product"); + redis.hset("product:1", "price", "99.99"); + redis.hset("product:1", "category", "electronics,gadgets"); + + // Get index information using ByteArrayCodec connection + // Note: ftInfo always takes a String index name, but the connection uses ByteArrayCodec + // internally for parsing the response + IndexInfo info = redisBinary.ftInfo(testIndex); + + // Verify basic information is present + assertThat(info).isNotNull(); + assertThat(info.getIndexName()).isEqualTo(testIndex); + + // Verify index definition + assertThat(info.getIndexDefinition()).isNotNull(); + assertThat(info.getIndexDefinition().getKeyType()).isNotNull(); + + // Verify fields + assertThat(info.getFields()).isNotEmpty(); + assertThat(info.getFields()).hasSize(3); + + // Verify field types + List fields = info.getFields(); + assertThat(fields.stream().filter(f -> f instanceof IndexInfo.TextField).count()).isEqualTo(1); + assertThat(fields.stream().filter(f -> f instanceof IndexInfo.NumericField).count()).isEqualTo(1); + assertThat(fields.stream().filter(f -> f instanceof IndexInfo.TagField).count()).isEqualTo(1); + + // Verify category field properties + IndexInfo.Field categoryFieldInfo = fields.stream() + .filter(f -> "category".equals(new String((byte[]) f.getIdentifier()))).findFirst().orElse(null); + assertThat(categoryFieldInfo).isNotNull(); + assertThat(categoryFieldInfo).isInstanceOf(IndexInfo.TagField.class); + assertThat(((IndexInfo.TagField) categoryFieldInfo).getSeparator()).isNotNull(); + assertThat(((IndexInfo.TagField) categoryFieldInfo).getSeparator()).isEqualTo(","); + + // Verify document count + assertThat(info.getNumDocs()).isEqualTo(1L); + + // Cleanup + assertThat(redis.ftDropindex(testIndex)).isEqualTo("OK"); + } + + @Test + void testFtInfoCommandWithCompositeCodec() { + String testIndex = "composite-codec-idx"; + + // Create a connection with composite codec (String keys, byte[] values) + RedisCodec codec = RedisCodec.of(StringCodec.UTF8, ByteArrayCodec.INSTANCE); + StatefulRedisConnection compositeConnection = client.connect(codec); + RedisCommands compositeRedis = compositeConnection.sync(); + + try { + // Create field definitions using String codec + FieldArgs titleField = TextFieldArgs. builder().name("title").build(); + FieldArgs priceField = NumericFieldArgs. builder().name("price").sortable().build(); + FieldArgs categoryField = TagFieldArgs. builder().name("category").separator(",").build(); + + // Create index using String codec + CreateArgs createArgs = CreateArgs. builder().withPrefix("product:") + .on(CreateArgs.TargetType.HASH).build(); + assertThat(redis.ftCreate(testIndex, createArgs, Arrays.asList(titleField, priceField, categoryField))) + .isEqualTo("OK"); + + // Add test documents using regular String connection + redis.hset("product:1", "title", "Test Product"); + redis.hset("product:1", "price", "99.99"); + redis.hset("product:1", "category", "electronics,gadgets"); + + // Get index information using composite codec connection + IndexInfo info = compositeRedis.ftInfo(testIndex); + + // Verify basic information is present + assertThat(info).isNotNull(); + // Index name should be a String (key type) + assertThat(info.getIndexName()).isEqualTo(testIndex); + + // Verify index definition + assertThat(info.getIndexDefinition()).isNotNull(); + assertThat(info.getIndexDefinition().getKeyType()).isNotNull(); + + // Verify fields + assertThat(info.getFields()).isNotEmpty(); + assertThat(info.getFields()).hasSize(3); + + // Verify field types - identifiers and attributes should be byte[] (value type) + List> fields = info.getFields(); + assertThat(fields.stream().filter(f -> f instanceof IndexInfo.TextField).count()).isEqualTo(1); + assertThat(fields.stream().filter(f -> f instanceof IndexInfo.NumericField).count()).isEqualTo(1); + assertThat(fields.stream().filter(f -> f instanceof IndexInfo.TagField).count()).isEqualTo(1); + + // Verify category field properties - identifier should be byte[] + IndexInfo.Field categoryFieldInfo = fields.stream() + .filter(f -> "category".equals(new String(f.getIdentifier()))).findFirst().orElse(null); + assertThat(categoryFieldInfo).isNotNull(); + assertThat(categoryFieldInfo).isInstanceOf(IndexInfo.TagField.class); + assertThat(((IndexInfo.TagField) categoryFieldInfo).getSeparator()).isNotNull(); + assertThat(((IndexInfo.TagField) categoryFieldInfo).getSeparator()).isEqualTo(","); + + // Verify document count + assertThat(info.getNumDocs()).isEqualTo(1L); + } finally { + compositeConnection.close(); + assertThat(redis.ftDropindex(testIndex)).isEqualTo("OK"); + } + } + /** * Test field aliases in RETURN clause to rename fields in search results. */ @@ -1327,4 +1538,158 @@ private byte[] floatArrayToByteArray(float[] vector) { return buffer.array(); } + /** + * Comprehensive test that verifies all CreateArgs options are properly reflected in IndexInfo. + */ + @Test + void testIndexInfoWithAllCreateArgsOptions() { + String testIndex = "comprehensive-idx"; + + // Create index with all available CreateArgs options + CreateArgs createArgs = CreateArgs. builder().on(CreateArgs.TargetType.HASH) + .withPrefix("test:").filter("@category=='electronics'").defaultLanguage(DocumentLanguage.ENGLISH) + .languageField("lang").defaultScore(0.5).scoreField("score").payloadField("payload").noOffsets() + .noHighlighting().noFields().noFrequency().maxTextFields().skipInitialScan().build(); + + FieldArgs titleField = TextFieldArgs. builder().name("title").sortable().noStem().build(); + + FieldArgs priceField = NumericFieldArgs. builder().name("price").sortable().build(); + + FieldArgs categoryField = TagFieldArgs. builder().name("category").separator(",").build(); + + assertThat(redis.ftCreate(testIndex, createArgs, Arrays.asList(titleField, priceField, categoryField))).isEqualTo("OK"); + + // Get index information + IndexInfo info = redis.ftInfo(testIndex); + + // Verify basic information + assertThat(info).isNotNull(); + assertThat(info.getIndexName()).isEqualTo(testIndex); + + // Verify index definition options + IndexInfo.IndexDefinition definition = info.getIndexDefinition(); + assertThat(definition).isNotNull(); + assertThat(definition.getKeyType()).isNotNull(); + assertThat(definition.getKeyType()).isEqualTo(IndexInfo.IndexDefinition.TargetType.HASH); + + // Verify prefixes + assertThat(definition.getPrefixes()).containsExactly("test:"); + + // Verify filter + assertThat(definition.getFilter()).isNotNull(); + assertThat(definition.getFilter()).isEqualTo("@category=='electronics'"); + + // Verify language settings + // Note: Redis FT.INFO does not return default_language, only language_field + assertThat(definition.getLanguageField()).isNotNull(); + assertThat(definition.getLanguageField()).isEqualTo("lang"); + + // Verify score settings + assertThat(definition.getDefaultScore()).isEqualTo(0.5); + assertThat(definition.getScoreField()).isNotNull(); + assertThat(definition.getScoreField()).isEqualTo("score"); + + // Verify payload field + assertThat(definition.getPayloadField()).isNotNull(); + assertThat(definition.getPayloadField()).isEqualTo("payload"); + + // Verify index options (the main focus of this test) + assertThat(info.isNoOffsets()).isTrue(); + assertThat(info.isNoHighlight()).isTrue(); + assertThat(info.isNoFields()).isTrue(); + assertThat(info.isNoFrequency()).isTrue(); + assertThat(info.isMaxTextFields()).isTrue(); + // Note: Redis FT.INFO does not return SKIPINITIALSCAN option + // assertThat(info.isSkipInitialScan()).isTrue(); + + // Verify fields + assertThat(info.getFields()).hasSize(3); + + // Verify title field properties + IndexInfo.Field titleFieldInfo = info.getFields().stream().filter(f -> "title".equals(f.getIdentifier())).findFirst() + .orElse(null); + assertThat(titleFieldInfo).isNotNull(); + assertThat(titleFieldInfo).isInstanceOf(IndexInfo.TextField.class); + assertThat(titleFieldInfo.isSortable()).isTrue(); + assertThat(((IndexInfo.TextField) titleFieldInfo).isNoStem()).isTrue(); + + // Verify price field properties + IndexInfo.Field priceFieldInfo = info.getFields().stream().filter(f -> "price".equals(f.getIdentifier())).findFirst() + .orElse(null); + assertThat(priceFieldInfo).isNotNull(); + assertThat(priceFieldInfo).isInstanceOf(IndexInfo.NumericField.class); + assertThat(priceFieldInfo.isSortable()).isTrue(); + + // Verify category field properties + IndexInfo.Field categoryFieldInfo = info.getFields().stream().filter(f -> "category".equals(f.getIdentifier())) + .findFirst().orElse(null); + assertThat(categoryFieldInfo).isNotNull(); + assertThat(categoryFieldInfo).isInstanceOf(IndexInfo.TagField.class); + assertThat(((IndexInfo.TagField) categoryFieldInfo).getSeparator()).isNotNull(); + assertThat(((IndexInfo.TagField) categoryFieldInfo).getSeparator()).isEqualTo(","); + + // Cleanup + assertThat(redis.ftDropindex(testIndex)).isEqualTo("OK"); + } + + /** + * Test that verifies strongly-typed statistics objects are properly populated. + */ + @Test + void testIndexInfoStatistics() { + String testIndex = "stats-idx"; + + // Create a simple index + CreateArgs createArgs = CreateArgs. builder().on(CreateArgs.TargetType.HASH) + .withPrefix("stats:").build(); + + FieldArgs titleField = TextFieldArgs. builder().name("title").build(); + + assertThat(redis.ftCreate(testIndex, createArgs, Collections.singletonList(titleField))).isEqualTo("OK"); + + // Add some documents to generate statistics + redis.hset("stats:1", "title", "First document"); + redis.hset("stats:2", "title", "Second document"); + redis.hset("stats:3", "title", "Third document"); + + // Wait a bit for indexing to complete + try { + Thread.sleep(100); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + + // Get index information + IndexInfo info = redis.ftInfo(testIndex); + + // Verify size statistics are available + IndexInfo.SizeStatistics sizeStats = info.getSizeStats(); + if (sizeStats != null) { + // At least some size statistics should be present + assertThat(sizeStats.getInvertedSizeMb()).isNotNull(); + assertThat(sizeStats.getDocTableSizeMb()).isNotNull(); + } + + // Verify indexing statistics are available + IndexInfo.IndexingStatistics indexingStats = info.getIndexingStats(); + if (indexingStats != null) { + // Indexing should be complete + // If indexing field is present, it should be false (complete) + assertThat(indexingStats.isIndexing()).isFalse(); + // Percent indexed should be 1.0 (100%) + assertThat(indexingStats.getPercentIndexed()).isEqualTo(1.0); + } + + // Verify GC statistics structure (may or may not have data) + IndexInfo.GcStatistics gcStats = info.getGcStats(); + // GC stats may be null if no GC has run yet, which is fine + + // Verify cursor statistics structure (may or may not have data) + IndexInfo.CursorStatistics cursorStats = info.getCursorStats(); + // Cursor stats may be null if no cursors have been used, which is fine + + // Cleanup + assertThat(redis.ftDropindex(testIndex)).isEqualTo("OK"); + } + }