|
1.进入到安装了全文检索工具包的虚拟环境中
/home/python/.virtualenvs/py3_django/lib/python3.5/site-packages/
进入到haystack/backends/中
2.创建ChineseAnalyzer.py文件
import jieba
from whoosh.analysis import Tokenizer, Token
class ChineseTokenizer(Tokenizer):
def __call__(self, value, positions=False, chars=False,
keeporiginal=False, removestops=True,
start_pos=0, start_char=0, mode='', **kwargs):
t = Token(positions, chars, removestops=removestops, mode=mode, **kwargs)
seglist = jieba.cut(value, cut_all=True)
for w in seglist:
t.original = t.text = w
t.boost = 1.0
if positions:
t.pos = start_pos + value.find(w)
if chars:
t.startchar = start_char + value.find(w)
t.endchar = start_char + value.find(w) + len(w)
yield t
def ChineseAnalyzer():
return ChineseTokenizer()
3.拷贝whoosh_backend.py为whoosh_cn_backend.py
cp whoosh_backend.py whoosh_cn_backend.py
4.更改分词的类为ChineseAnalyzer
打开并编辑 whoosh_cn_backend.py
引入from .ChineseAnalyzer import ChineseAnalyzer
查找
analyzer=StemmingAnalyzer()
改为
analyzer=ChineseAnalyzer()
5.更改分词引擎
6.重新创建索引数据
python manage.py rebuild_index
///////////////
电脑私钥和公钥生成openssl
openssl
genrsa -out app_private_key.pem 2048 #私钥
rsa -in app_private_key.pem -pubout -out app_public_key.pem #导出公钥