CREATE TABLE `t_test_filter` (
`Fcommodity_id` bigint(20) unsigned NOT NULL DEFAULT '0',
` FUserId` int(10) unsigned NOT NULL DEFAULT '0',
` FUserName` varchar(32) NOT NULL DEFAULT '',
`Fshopid` bigint(20) unsigned NOT NULL DEFAULT '0',
`Ftitle` varchar(60) NOT NULL DEFAULT '',
`FProperty` int(10) unsigned NOT NULL DEFAULT '0',
`FTime` int(10) unsigned NOT NULL DEFAULT '0',
PRIMARY KEY (`Fcommodity_id`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
`id` bigint(20) unsigned NOT NULL,
`weight` int(11) DEFAULT '1',
`query` varchar(3072) NOT NULL,
`total_found` int(11) DEFAULT '0',
`total_match` int(11) DEFAULT '0',
`query_time` int(11) DEFAULT '0',
` FUserId` int(10) unsigned DEFAULT '0',
`Fshopid` bigint(20) unsigned NOT NULL DEFAULT '0',
`FProperty` int(10) unsigned NOT NULL DEFAULT '0',
`FTime` int(10) unsigned NOT NULL DEFAULT '0',
KEY `query` (`query`(1536))
) ENGINE=SPHINX DEFAULT CHARSET=gbk CONNECTION='sphinx://localhost:9412'
insert into t_test_filter values(4567,123,"test qq nick", 1234567,"title name",2324354,9876);
insert into t_test_filter values(2,34,"nick name", 4545,"title name",67,40);
insert into t_test_filter values(66,55,"test3 qq3 nick3", 22,"title3 name3 ",88,77);
insert into t_test_filter values(767,234,"test3 qq nick", 3433,"title3 name3 ",342342,89340);
insert into t_test_filter values(4545,123,"test qq nick", 1234567,"title name",2324354,9876);
insert into t_test_filter values(4546,778,"test qq nick", 1234567,"title name",2324354,9876);
通过sphinxSE测试查询
select * from t_test_filter_sphinxSE where query="nick;select=IF( FUserId=34,1,0) + IF(Fshopid=1234567,10,0) as match_qq; filter= match_qq, 1,10,11";
(2)查询词是nick,满足条件 FUserId=34 || (Fshopid = 1234567 && FUserId=778);
select * from t_test_filter_sphinxSE where query="nick;select=IF( FUserId=34,1,0) + IF(Fshopid=1234567,10,0) + IF( FUserId=778,100,0) as match_qq; filter= match_qq, 1,110,111";
这两条语句的作用相同。
2. 上面的语法主要是query里面的语句。
(1) 第一个词是nick,表示需要查询到关键词。
(2)select语句,这个等同于api接口中的SetFilter()。这个是从配置文件的sql_attr_xxxx中,过滤返回的数据。select计算的结果,保存在某个变量名(match_qq)中。这里的计算支持+, -, *, /, <, > <=, >=, =, <>等操作符。
(3)filter语句,这个等同于api接口中的SetFilter()。根据select的结果,用filter过滤。不同的查询条件可以得到不同的组合。这样就可以支持非常复杂的查询过滤条件了。
(2) c-api和php的接口,在coreseek3.2.14应该已经支持,他已经提供了SetSelect()和SetFilter()这两个api,但我没有实试过。
{
type = mysql
sql_user = xxxx
sql_pass = xxx
sql_db = test
sql_port = 3306
sql_attr_bigint = Fshopid
sql_attr_uint = FProperty
sql_attr_uint = FTime
}
index t_test_filter
{
source = t_test_filter
path = /usr/local/coreseek/var/data/t_test_filter
docinfo = extern
mlock = 0
morphology = none
exceptions = /usr/local/coreseek/var/exceptions.txt
min_word_len = 1
charset_type = zh_cn.gbk
charset_dictpath = /usr/local/coreseek/dict
ngram_len = 1
ngram_chars = U+3000..U+2FA1F
html_strip = 0
blend_mode = trim_none
}
#############################################################################
## indexer settings
#############################################################################
{
# memory limit, in bytes, kiloytes (16384K) or megabytes (256M)
# optional, default is 32M, max is 2047M, recommended is 256M to 1024M
mem_limit = 40M
# optional, default is 0 (unlimited)
#
# max_iops = 40
# maximum IO call size, bytes (for I/O throttling)
# optional, default is 0 (unlimited)
#
# max_iosize = 1048576
# maximum xmlpipe2 field length, bytes
# optional, default is 2M
#
# max_xmlpipe2_field = 4M
# write buffer size, bytes
# several (currently up to 4) buffers will be allocated
# write buffers are allocated in addition to mem_limit
# optional, default is 1M
#
write_buffer = 1M
}
## searchd settings
#############################################################################
{
# hostname, port, or hostname:port, or /unix/socket/path to listen on
# multi-value, multiple listen points are allowed
# optional, default is 0.0.0.0:9312 (listen on all interfaces, port 9312)
#
# listen = 127.0.0.1
# listen = 192.168.0.1:9312
listen = 9412
# listen = /var/run/searchd.sock
listen = localhost:9406:mysql41
binlog_path = /usr/local/coreseek/var/data
# optional, default is 'searchd.log'
log = /usr/local/coreseek/var/log/searchd_9412.log
# optional, default is empty (do not log queries)
query_log = /usr/local/coreseek/var/log/query_9412.log
# optional, default is 5
read_timeout = 5
# optional, default is 5 minutes
client_timeout = 300
# optional, default is 0 (unlimited)
max_children = 30
# mandatory
pid_file = /usr/local/coreseek/var/log/searchd_9412.pid
# WARNING, THERE'S ALSO PER-QUERY LIMIT, SEE SetLimits() API CALL
# default is 1000 (just like Google)
max_matches = 1000000
# optional, default is 1
seamless_rotate = 1
# optional, default is 0 (do not preopen)
preopen_indexes = 0
# optional, default is 1 (do unlink)
unlink_old = 1
# updates will be automatically dumped to disk this frequently
# optional, default is 0 (disable periodic flush)
#
# attr_flush_period = 900
# instance-wide ondisk_dict defaults (per-index value take precedence)
# optional, default is 0 (precache all dictionaries in RAM)
#
# ondisk_dict_default = 1
# MVA updates pool size
# shared between all instances of searchd, disables attr flushes!
# optional, default size is 1M
mva_updates_pool = 1M
# limits both query packets from clients, and responses from agents
# optional, default size is 8M
max_packet_size = 8M
# searchd will (try to) log crashed query to 'crash_log_path.PID' file
# optional, default is empty (do not create crash logs)
#
# crash_log_path = /usr/local/coreseek/var/log/crash
# max allowed per-query filter count
# optional, default is 256
max_filters = 256
# optional, default is 4096
max_filter_values = 4096
# socket listen queue length
# optional, default is 5
#
# listen_backlog = 5
# per-keyword read buffer size
# optional, default is 256K
#
# read_buffer = 256K
# unhinted read size (currently used when reading hits)
# optional, default is 32K
#
# read_unhinted = 32K
}