2012-02-20 21:47:14 +00:00
|
|
|
/* This file is (c) 2008-2012 Konstantin Isakov <ikm@goldendict.org>
|
2009-01-28 20:55:45 +00:00
|
|
|
* Part of GoldenDict. Licensed under GPLv3 or later, see the LICENSE file */
|
|
|
|
|
|
|
|
#include "stardict.hh"
|
|
|
|
#include "btreeidx.hh"
|
|
|
|
#include "folding.hh"
|
|
|
|
#include "utf8.hh"
|
|
|
|
#include "chunkedstorage.hh"
|
|
|
|
#include "dictzip.h"
|
|
|
|
#include "xdxf2html.hh"
|
|
|
|
#include "htmlescape.hh"
|
2009-04-23 19:57:39 +00:00
|
|
|
#include "langcoder.hh"
|
2011-06-19 18:50:11 +00:00
|
|
|
#include "dprintf.hh"
|
2012-02-29 13:00:38 +00:00
|
|
|
#include "fsencoding.hh"
|
|
|
|
#include "filetype.hh"
|
2009-04-23 19:57:39 +00:00
|
|
|
|
2009-01-28 20:55:45 +00:00
|
|
|
#include <zlib.h>
|
|
|
|
#include <map>
|
|
|
|
#include <set>
|
|
|
|
#include <string>
|
2009-02-02 00:59:14 +00:00
|
|
|
#ifndef __WIN32
|
2009-01-28 20:55:45 +00:00
|
|
|
#include <arpa/inet.h>
|
2009-02-02 00:59:14 +00:00
|
|
|
#else
|
|
|
|
#include <winsock.h>
|
|
|
|
#endif
|
2009-01-30 01:20:37 +00:00
|
|
|
#include <stdlib.h>
|
2009-01-28 20:55:45 +00:00
|
|
|
|
2009-04-29 23:18:26 +00:00
|
|
|
#ifdef _MSC_VER
|
|
|
|
#include <stub_msvc.h>
|
|
|
|
#endif
|
|
|
|
|
2009-01-28 20:55:45 +00:00
|
|
|
#include <QString>
|
2009-04-16 11:33:12 +00:00
|
|
|
#include <QSemaphore>
|
|
|
|
#include <QThreadPool>
|
|
|
|
#include <QAtomicInt>
|
2009-01-28 20:55:45 +00:00
|
|
|
|
2011-09-09 12:05:28 +00:00
|
|
|
#include "ufile.hh"
|
2009-04-29 23:18:26 +00:00
|
|
|
|
2009-01-28 20:55:45 +00:00
|
|
|
namespace Stardict {
|
|
|
|
|
|
|
|
using std::map;
|
|
|
|
using std::multimap;
|
|
|
|
using std::pair;
|
|
|
|
using std::set;
|
|
|
|
using std::string;
|
2009-04-18 17:20:12 +00:00
|
|
|
using gd::wstring;
|
2009-01-28 20:55:45 +00:00
|
|
|
|
|
|
|
using BtreeIndexing::WordArticleLink;
|
|
|
|
using BtreeIndexing::IndexedWords;
|
2009-04-14 16:35:47 +00:00
|
|
|
using BtreeIndexing::IndexInfo;
|
2009-01-28 20:55:45 +00:00
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
DEF_EX( exNotAnIfoFile, "Not an .ifo file", Dictionary::Ex )
|
|
|
|
DEF_EX_STR( exBadFieldInIfo, "Bad field in .ifo file encountered:", Dictionary::Ex )
|
|
|
|
DEF_EX_STR( exNoIdxFile, "No corresponding .idx file was found for", Dictionary::Ex )
|
|
|
|
DEF_EX_STR( exNoDictFile, "No corresponding .dict file was found for", Dictionary::Ex )
|
|
|
|
DEF_EX_STR( exNoSynFile, "No corresponding .syn file was found for", Dictionary::Ex )
|
|
|
|
|
|
|
|
DEF_EX( ex64BitsNotSupported, "64-bit indices are not presently supported, sorry", Dictionary::Ex )
|
|
|
|
DEF_EX( exDicttypeNotSupported, "Dictionaries with dicttypes are not supported, sorry", Dictionary::Ex )
|
|
|
|
|
|
|
|
DEF_EX_STR( exCantReadFile, "Can't read file", Dictionary::Ex )
|
|
|
|
DEF_EX_STR( exWordIsTooLarge, "Enountered a word that is too large:", Dictionary::Ex )
|
|
|
|
DEF_EX_STR( exSuddenEndOfFile, "Sudden end of file", Dictionary::Ex )
|
|
|
|
|
|
|
|
DEF_EX_STR( exIncorrectOffset, "Incorrect offset encountered in file", Dictionary::Ex )
|
|
|
|
|
|
|
|
/// Contents of an ifo file
|
|
|
|
struct Ifo
|
|
|
|
{
|
|
|
|
string version;
|
|
|
|
string bookname;
|
|
|
|
uint32_t wordcount, synwordcount, idxfilesize, idxoffsetbits;
|
2012-09-07 13:58:45 +00:00
|
|
|
string sametypesequence, dicttype, description;
|
2012-11-19 15:30:26 +00:00
|
|
|
string copyright, author, email;
|
2009-01-28 20:55:45 +00:00
|
|
|
|
|
|
|
Ifo( File::Class & );
|
|
|
|
};
|
|
|
|
|
|
|
|
enum
|
|
|
|
{
|
|
|
|
Signature = 0x58444953, // SIDX on little-endian, XDIS on big-endian
|
2013-02-18 18:10:06 +00:00
|
|
|
CurrentFormatVersion = 8 + BtreeIndexing::FormatVersion + Folding::Version
|
2009-01-28 20:55:45 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct IdxHeader
|
|
|
|
{
|
|
|
|
uint32_t signature; // First comes the signature, SIDX
|
|
|
|
uint32_t formatVersion; // File format version (CurrentFormatVersion)
|
|
|
|
uint32_t chunksOffset; // The offset to chunks' storage
|
2009-04-14 16:35:47 +00:00
|
|
|
uint32_t indexBtreeMaxElements; // Two fields from IndexInfo
|
|
|
|
uint32_t indexRootOffset;
|
|
|
|
uint32_t wordCount; // Saved from Ifo::wordcount
|
|
|
|
uint32_t synWordCount; // Saved from Ifo::synwordcount
|
|
|
|
uint32_t bookNameSize; // Book name's length. Used to read it then.
|
|
|
|
uint32_t sameTypeSequenceSize; // That string's size. Used to read it then.
|
2009-04-23 19:57:39 +00:00
|
|
|
uint32_t langFrom; // Source language
|
|
|
|
uint32_t langTo; // Target language
|
2009-05-05 21:51:21 +00:00
|
|
|
}
|
2009-04-29 23:18:26 +00:00
|
|
|
#ifndef _MSC_VER
|
|
|
|
__attribute__((packed))
|
|
|
|
#endif
|
|
|
|
;
|
2009-01-28 20:55:45 +00:00
|
|
|
|
|
|
|
bool indexIsOldOrBad( string const & indexFile )
|
|
|
|
{
|
|
|
|
File::Class idx( indexFile, "rb" );
|
|
|
|
|
|
|
|
IdxHeader header;
|
|
|
|
|
|
|
|
return idx.readRecords( &header, sizeof( header ), 1 ) != 1 ||
|
|
|
|
header.signature != Signature ||
|
|
|
|
header.formatVersion != CurrentFormatVersion;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
class StardictDictionary: public BtreeIndexing::BtreeDictionary
|
|
|
|
{
|
2009-03-26 19:00:08 +00:00
|
|
|
Mutex idxMutex;
|
2009-01-28 20:55:45 +00:00
|
|
|
File::Class idx;
|
|
|
|
IdxHeader idxHeader;
|
2009-04-14 16:35:47 +00:00
|
|
|
string bookName;
|
|
|
|
string sameTypeSequence;
|
2009-01-28 20:55:45 +00:00
|
|
|
ChunkedStorage::Reader chunks;
|
2009-04-16 11:33:12 +00:00
|
|
|
Mutex dzMutex;
|
2009-01-28 20:55:45 +00:00
|
|
|
dictData * dz;
|
|
|
|
|
|
|
|
public:
|
|
|
|
|
|
|
|
StardictDictionary( string const & id, string const & indexFile,
|
2009-04-14 16:35:47 +00:00
|
|
|
vector< string > const & dictionaryFiles );
|
2009-01-28 20:55:45 +00:00
|
|
|
|
|
|
|
~StardictDictionary();
|
|
|
|
|
|
|
|
virtual string getName() throw()
|
2009-04-14 16:35:47 +00:00
|
|
|
{ return bookName; }
|
2009-01-28 20:55:45 +00:00
|
|
|
|
|
|
|
virtual map< Dictionary::Property, string > getProperties() throw()
|
|
|
|
{ return map< Dictionary::Property, string >(); }
|
|
|
|
|
|
|
|
virtual unsigned long getArticleCount() throw()
|
2009-04-14 16:35:47 +00:00
|
|
|
{ return idxHeader.wordCount; }
|
2009-01-28 20:55:45 +00:00
|
|
|
|
|
|
|
virtual unsigned long getWordCount() throw()
|
2009-04-14 16:35:47 +00:00
|
|
|
{ return idxHeader.wordCount + idxHeader.synWordCount; }
|
2009-01-28 20:55:45 +00:00
|
|
|
|
2009-04-23 19:57:39 +00:00
|
|
|
inline virtual quint32 getLangFrom() const
|
|
|
|
{ return idxHeader.langFrom; }
|
|
|
|
|
|
|
|
inline virtual quint32 getLangTo() const
|
|
|
|
{ return idxHeader.langTo; }
|
|
|
|
|
2009-03-26 19:00:08 +00:00
|
|
|
virtual sptr< Dictionary::WordSearchRequest > findHeadwordsForSynonym( wstring const & )
|
2009-01-28 20:55:45 +00:00
|
|
|
throw( std::exception );
|
|
|
|
|
2009-03-26 19:00:08 +00:00
|
|
|
virtual sptr< Dictionary::DataRequest > getArticle( wstring const &,
|
2009-05-29 19:48:50 +00:00
|
|
|
vector< wstring > const & alts,
|
|
|
|
wstring const & )
|
2009-03-26 19:00:08 +00:00
|
|
|
throw( std::exception );
|
2009-01-28 20:55:45 +00:00
|
|
|
|
2012-02-29 13:00:38 +00:00
|
|
|
virtual sptr< Dictionary::DataRequest > getResource( string const & name )
|
|
|
|
throw( std::exception );
|
|
|
|
|
2012-09-07 13:58:45 +00:00
|
|
|
virtual QString const& getDescription();
|
|
|
|
|
2012-12-03 12:47:43 +00:00
|
|
|
protected:
|
2009-01-28 20:55:45 +00:00
|
|
|
|
2012-12-03 12:47:43 +00:00
|
|
|
void loadIcon() throw();
|
|
|
|
|
|
|
|
private:
|
2012-03-01 13:02:02 +00:00
|
|
|
|
2009-01-28 20:55:45 +00:00
|
|
|
/// Retrives the article's offset/size in .dict file, and its headword.
|
|
|
|
void getArticleProps( uint32_t articleAddress,
|
|
|
|
string & headword,
|
|
|
|
uint32_t & offset, uint32_t & size );
|
|
|
|
|
|
|
|
/// Loads the article, storing its headword and formatting the data it has
|
|
|
|
/// into an html.
|
|
|
|
void loadArticle( uint32_t address,
|
|
|
|
string & headword,
|
|
|
|
string & articleText );
|
2009-04-14 16:35:47 +00:00
|
|
|
|
|
|
|
string loadString( size_t size );
|
2009-04-16 11:33:12 +00:00
|
|
|
|
2012-02-29 13:00:38 +00:00
|
|
|
string handleResource( char type, char const * resource, size_t size );
|
|
|
|
|
2009-04-16 11:33:12 +00:00
|
|
|
friend class StardictArticleRequest;
|
|
|
|
friend class StardictHeadwordsRequest;
|
2009-01-28 20:55:45 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
StardictDictionary::StardictDictionary( string const & id,
|
|
|
|
string const & indexFile,
|
2009-04-14 16:35:47 +00:00
|
|
|
vector< string > const & dictionaryFiles ):
|
2009-01-28 20:55:45 +00:00
|
|
|
BtreeDictionary( id, dictionaryFiles ),
|
|
|
|
idx( indexFile, "rb" ),
|
|
|
|
idxHeader( idx.read< IdxHeader >() ),
|
2009-04-14 16:35:47 +00:00
|
|
|
bookName( loadString( idxHeader.bookNameSize ) ),
|
|
|
|
sameTypeSequence( loadString( idxHeader.sameTypeSequenceSize ) ),
|
2012-12-03 12:47:43 +00:00
|
|
|
chunks( idx, idxHeader.chunksOffset )
|
2009-01-28 20:55:45 +00:00
|
|
|
{
|
|
|
|
// Open the .dict file
|
|
|
|
|
|
|
|
dz = dict_data_open( dictionaryFiles[ 2 ].c_str(), 0 );
|
|
|
|
|
|
|
|
if ( !dz )
|
|
|
|
throw exCantReadFile( dictionaryFiles[ 2 ] );
|
|
|
|
|
|
|
|
// Initialize the index
|
|
|
|
|
2009-04-14 16:35:47 +00:00
|
|
|
openIndex( IndexInfo( idxHeader.indexBtreeMaxElements,
|
|
|
|
idxHeader.indexRootOffset ),
|
|
|
|
idx, idxMutex );
|
2009-01-28 20:55:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
StardictDictionary::~StardictDictionary()
|
|
|
|
{
|
|
|
|
if ( dz )
|
|
|
|
dict_data_close( dz );
|
|
|
|
}
|
|
|
|
|
2012-12-03 12:47:43 +00:00
|
|
|
void StardictDictionary::loadIcon() throw()
|
2012-03-01 13:02:02 +00:00
|
|
|
{
|
|
|
|
if ( dictionaryIconLoaded )
|
|
|
|
return;
|
|
|
|
|
|
|
|
QString fileName =
|
|
|
|
QDir::fromNativeSeparators( FsEncoding::decode( getDictionaryFilenames()[ 0 ].c_str() ) );
|
|
|
|
|
|
|
|
// Remove the extension
|
|
|
|
fileName.chop( 3 );
|
|
|
|
|
2012-12-03 12:47:43 +00:00
|
|
|
if( !loadIconFromFile( fileName ) )
|
2012-03-01 13:02:02 +00:00
|
|
|
{
|
|
|
|
// Load failed -- use default icons
|
|
|
|
dictionaryNativeIcon = dictionaryIcon = QIcon(":/icons/icon32_stardict.png");
|
|
|
|
}
|
|
|
|
|
|
|
|
dictionaryIconLoaded = true;
|
|
|
|
}
|
|
|
|
|
2009-04-14 16:35:47 +00:00
|
|
|
string StardictDictionary::loadString( size_t size )
|
|
|
|
{
|
|
|
|
vector< char > data( size );
|
|
|
|
|
|
|
|
idx.read( &data.front(), data.size() );
|
|
|
|
|
|
|
|
return string( &data.front(), data.size() );
|
|
|
|
}
|
|
|
|
|
2009-01-28 20:55:45 +00:00
|
|
|
void StardictDictionary::getArticleProps( uint32_t articleAddress,
|
|
|
|
string & headword,
|
|
|
|
uint32_t & offset, uint32_t & size )
|
|
|
|
{
|
|
|
|
vector< char > chunk;
|
|
|
|
|
2009-03-26 19:00:08 +00:00
|
|
|
Mutex::Lock _( idxMutex );
|
2009-04-21 19:03:16 +00:00
|
|
|
|
2009-01-28 20:55:45 +00:00
|
|
|
char * articleData = chunks.getBlock( articleAddress, chunk );
|
|
|
|
|
|
|
|
memcpy( &offset, articleData, sizeof( uint32_t ) );
|
|
|
|
articleData += sizeof( uint32_t );
|
|
|
|
memcpy( &size, articleData, sizeof( uint32_t ) );
|
|
|
|
articleData += sizeof( uint32_t );
|
|
|
|
|
|
|
|
headword = articleData;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// This function tries to make an html of the Stardict's resource typed
|
|
|
|
/// 'type', contained in a block pointed to by 'resource', 'size' bytes long.
|
2012-02-29 13:00:38 +00:00
|
|
|
string StardictDictionary::handleResource( char type, char const * resource, size_t size )
|
2009-01-28 20:55:45 +00:00
|
|
|
{
|
|
|
|
switch( type )
|
|
|
|
{
|
|
|
|
case 'x': // Xdxf content
|
2012-02-29 13:00:38 +00:00
|
|
|
return Xdxf2Html::convert( string( resource, size ), Xdxf2Html::STARDICT, NULL, this );
|
2009-01-28 20:55:45 +00:00
|
|
|
case 'h': // Html content
|
2012-02-29 13:00:38 +00:00
|
|
|
{
|
|
|
|
string articleText = "<div class=\"sdct_h\">" + string( resource, size ) + "</div>";
|
|
|
|
|
|
|
|
return ( QString::fromUtf8( articleText.c_str() )
|
2012-03-18 18:17:49 +00:00
|
|
|
.replace( QRegExp( "(<\\s*img\\s+[^>]*src\\s*=\\s*[\"']+)((?!data:)[^\"']*)", Qt::CaseInsensitive ),
|
2012-02-29 13:00:38 +00:00
|
|
|
"\\1bres://" + QString::fromStdString( getId() ) + "/\\2" )
|
2013-02-17 14:17:22 +00:00
|
|
|
.replace( QRegExp( "(<\\s*link\\s+[^>]*href\\s*=\\s*[\"']+)((?!data:)[^\"']*)", Qt::CaseInsensitive ),
|
|
|
|
"\\1bres://" + QString::fromStdString( getId() ) + "/\\2" )
|
2012-02-29 13:00:38 +00:00
|
|
|
.toUtf8().data() );
|
|
|
|
}
|
2009-01-28 20:55:45 +00:00
|
|
|
case 'm': // Pure meaning, usually means preformatted text
|
2009-04-09 18:58:40 +00:00
|
|
|
return "<div class=\"sdct_m\">" + Html::preformat( string( resource, size ) ) + "</div>";
|
2009-01-28 20:55:45 +00:00
|
|
|
case 'l': // Same as 'm', but not in utf8, instead in current locale's
|
|
|
|
// encoding.
|
|
|
|
// We just use Qt here, it should know better about system's
|
|
|
|
// locale.
|
2009-04-09 18:58:40 +00:00
|
|
|
return "<div class=\"sdct_l\">" + Html::preformat( QString::fromLocal8Bit( resource, size ).toUtf8().data() ) + "</div>";
|
2009-01-28 20:55:45 +00:00
|
|
|
case 'g': // Pango markup.
|
|
|
|
return "<div class=\"sdct_g\">" + string( resource, size ) + "</div>";
|
|
|
|
case 't': // Transcription
|
|
|
|
return "<div class=\"sdct_t\">" + Html::escape( string( resource, size ) ) + "</div>";
|
|
|
|
case 'y': // Chinese YinBiao or Japanese KANA. Examples are needed. For now,
|
|
|
|
// just output as pure escaped utf8.
|
|
|
|
return "<div class=\"sdct_y\">" + Html::escape( string( resource, size ) ) + "</div>";
|
|
|
|
case 'k': // KingSoft PowerWord data. We don't know how to handle that.
|
|
|
|
return "<div class=\"sdct_k\">" + Html::escape( string( resource, size ) ) + "</div>";
|
|
|
|
case 'w': // MediaWiki markup. We don't handle this right now.
|
|
|
|
return "<div class=\"sdct_w\">" + Html::escape( string( resource, size ) ) + "</div>";
|
|
|
|
case 'n': // WordNet data. We don't know anything about it.
|
|
|
|
return "<div class=\"sdct_n\">" + Html::escape( string( resource, size ) ) + "</div>";
|
|
|
|
|
|
|
|
case 'r': // Resource file list. For now, resources aren't handled.
|
|
|
|
return "<div class=\"sdct_r\">" + Html::escape( string( resource, size ) ) + "</div>";
|
|
|
|
|
|
|
|
case 'W': // An embedded Wav file. Unhandled yet.
|
|
|
|
return "<div class=\"sdct_W\">(an embedded .wav file)</div>";
|
|
|
|
case 'P': // An embedded picture file. Unhandled yet.
|
|
|
|
return "<div class=\"sdct_P\">(an embedded picture file)</div>";
|
|
|
|
}
|
|
|
|
|
|
|
|
if ( islower( type ) )
|
|
|
|
{
|
|
|
|
return string( "<b>Unknown textual entry type " ) + string( 1, type ) + ":</b> " + Html::escape( string( resource, size ) ) + "<br>";
|
|
|
|
}
|
|
|
|
else
|
|
|
|
return string( "<b>Unknown blob entry type " ) + string( 1, type ) + "</b><br>";
|
|
|
|
}
|
|
|
|
|
|
|
|
void StardictDictionary::loadArticle( uint32_t address,
|
|
|
|
string & headword,
|
|
|
|
string & articleText )
|
|
|
|
{
|
|
|
|
uint32_t offset, size;
|
|
|
|
|
|
|
|
getArticleProps( address, headword, offset, size );
|
|
|
|
|
2009-04-16 11:33:12 +00:00
|
|
|
char * articleBody;
|
|
|
|
|
|
|
|
{
|
|
|
|
Mutex::Lock _( dzMutex );
|
|
|
|
|
|
|
|
// Note that the function always zero-pads the result.
|
|
|
|
articleBody = dict_data_read_( dz, offset, size, 0, 0 );
|
|
|
|
}
|
2009-01-28 20:55:45 +00:00
|
|
|
|
|
|
|
if ( !articleBody )
|
2013-03-15 12:27:32 +00:00
|
|
|
{
|
|
|
|
// throw exCantReadFile( getDictionaryFilenames()[ 2 ] );
|
|
|
|
articleText = string( "<div class=\"sdict_m\">DICTZIP error: " ) + dict_error_str( dz ) + "</div>";
|
|
|
|
return;
|
|
|
|
}
|
2009-01-28 20:55:45 +00:00
|
|
|
|
|
|
|
articleText.clear();
|
|
|
|
|
|
|
|
char * ptr = articleBody;
|
|
|
|
|
2009-04-14 16:35:47 +00:00
|
|
|
if ( sameTypeSequence.size() )
|
2009-01-28 20:55:45 +00:00
|
|
|
{
|
|
|
|
/// The sequence is known, it's not stored in the article itself
|
2009-04-14 16:35:47 +00:00
|
|
|
for( unsigned seq = 0; seq < sameTypeSequence.size(); ++seq )
|
2009-01-28 20:55:45 +00:00
|
|
|
{
|
|
|
|
// Last entry doesn't have size info -- it is inferred from
|
|
|
|
// the bytes left
|
2009-04-14 16:35:47 +00:00
|
|
|
bool entrySizeKnown = ( seq == sameTypeSequence.size() - 1 );
|
2009-01-28 20:55:45 +00:00
|
|
|
|
2012-02-29 13:00:38 +00:00
|
|
|
uint32_t entrySize = 0;
|
2009-01-28 20:55:45 +00:00
|
|
|
|
|
|
|
if ( entrySizeKnown )
|
|
|
|
entrySize = size;
|
|
|
|
else
|
|
|
|
if ( !size )
|
|
|
|
{
|
2011-06-19 20:29:11 +00:00
|
|
|
FDPRINTF( stderr, "Warning: short entry for the word %s encountered.\n", headword.c_str() );
|
2009-01-28 20:55:45 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2009-04-14 16:35:47 +00:00
|
|
|
char type = sameTypeSequence[ seq ];
|
2009-01-28 20:55:45 +00:00
|
|
|
|
|
|
|
if ( islower( type ) )
|
|
|
|
{
|
|
|
|
// Zero-terminated entry, unless it's the last one
|
|
|
|
if ( !entrySizeKnown )
|
|
|
|
entrySize = strlen( ptr );
|
|
|
|
|
|
|
|
if ( size < entrySize )
|
|
|
|
{
|
2011-06-19 20:29:11 +00:00
|
|
|
FDPRINTF( stderr, "Warning: malformed entry for the word %s encountered.\n", headword.c_str() );
|
2009-01-28 20:55:45 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
articleText += handleResource( type, ptr, entrySize );
|
|
|
|
|
|
|
|
if ( !entrySizeKnown )
|
|
|
|
++entrySize; // Need to skip the zero byte
|
|
|
|
|
|
|
|
ptr += entrySize;
|
|
|
|
size -= entrySize;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
if ( isupper( *ptr ) )
|
|
|
|
{
|
|
|
|
// An entry which has its size before contents, unless it's the last one
|
|
|
|
|
|
|
|
if ( !entrySizeKnown )
|
|
|
|
{
|
|
|
|
if ( size < sizeof( uint32_t ) )
|
|
|
|
{
|
2011-06-19 20:29:11 +00:00
|
|
|
FDPRINTF( stderr, "Warning: malformed entry for the word %s encountered.\n", headword.c_str() );
|
2009-01-28 20:55:45 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy( &entrySize, ptr, sizeof( uint32_t ) );
|
|
|
|
|
|
|
|
entrySize = ntohl( entrySize );
|
|
|
|
|
|
|
|
ptr += sizeof( uint32_t );
|
|
|
|
size -= sizeof( uint32_t );
|
|
|
|
}
|
|
|
|
|
|
|
|
if ( size < entrySize )
|
|
|
|
{
|
2011-06-19 20:29:11 +00:00
|
|
|
FDPRINTF( stderr, "Warning: malformed entry for the word %s encountered.\n", headword.c_str() );
|
2009-01-28 20:55:45 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
articleText += handleResource( type, ptr, entrySize );
|
|
|
|
|
|
|
|
ptr += entrySize;
|
|
|
|
size -= entrySize;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2011-06-19 20:29:11 +00:00
|
|
|
FDPRINTF( stderr, "Warning: non-alpha entry type 0x%x for the word %s encountered.\n",
|
2009-01-28 20:55:45 +00:00
|
|
|
type, headword.c_str() );
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
// The sequence is stored in each article separately
|
|
|
|
while( size )
|
|
|
|
{
|
|
|
|
if ( islower( *ptr ) )
|
|
|
|
{
|
|
|
|
// Zero-terminated entry
|
|
|
|
size_t len = strlen( ptr + 1 );
|
|
|
|
|
|
|
|
if ( size < len + 2 )
|
|
|
|
{
|
2011-06-19 20:29:11 +00:00
|
|
|
FDPRINTF( stderr, "Warning: malformed entry for the word %s encountered.\n", headword.c_str() );
|
2009-01-28 20:55:45 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
articleText += handleResource( *ptr, ptr + 1, len );
|
|
|
|
|
|
|
|
ptr += len + 2;
|
|
|
|
size -= len + 2;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
if ( isupper( *ptr ) )
|
|
|
|
{
|
|
|
|
// An entry which havs its size before contents
|
|
|
|
if ( size < sizeof( uint32_t ) + 1 )
|
|
|
|
{
|
2011-06-19 20:29:11 +00:00
|
|
|
FDPRINTF( stderr, "Warning: malformed entry for the word %s encountered.\n", headword.c_str() );
|
2009-01-28 20:55:45 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t entrySize;
|
|
|
|
|
|
|
|
memcpy( &entrySize, ptr + 1, sizeof( uint32_t ) );
|
|
|
|
|
|
|
|
entrySize = ntohl( entrySize );
|
|
|
|
|
|
|
|
if ( size < sizeof( uint32_t ) + 1 + entrySize )
|
|
|
|
{
|
2011-06-19 20:29:11 +00:00
|
|
|
FDPRINTF( stderr, "Warning: malformed entry for the word %s encountered.\n", headword.c_str() );
|
2009-01-28 20:55:45 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
articleText += handleResource( *ptr, ptr + 1 + sizeof( uint32_t ), entrySize );
|
|
|
|
|
|
|
|
ptr += sizeof( uint32_t ) + 1 + entrySize;
|
|
|
|
size -= sizeof( uint32_t ) + 1 + entrySize;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2011-06-19 20:29:11 +00:00
|
|
|
FDPRINTF( stderr, "Warning: non-alpha entry type 0x%x for the word %s encountered.\n",
|
2009-01-28 20:55:45 +00:00
|
|
|
(unsigned)*ptr, headword.c_str() );
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
free( articleBody );
|
|
|
|
}
|
|
|
|
|
2012-09-07 13:58:45 +00:00
|
|
|
QString const& StardictDictionary::getDescription()
|
|
|
|
{
|
|
|
|
if( !dictionaryDescription.isEmpty() )
|
|
|
|
return dictionaryDescription;
|
|
|
|
|
|
|
|
File::Class ifoFile( getDictionaryFilenames()[ 0 ], "r" );
|
|
|
|
Ifo ifo( ifoFile );
|
|
|
|
|
2012-11-19 15:30:26 +00:00
|
|
|
if( !ifo.copyright.empty() )
|
|
|
|
dictionaryDescription += "Copyright: " + QString::fromUtf8( ifo.copyright.c_str() ) + "\n\n";
|
|
|
|
|
|
|
|
if( !ifo.author.empty() )
|
|
|
|
dictionaryDescription += "Author: " + QString::fromUtf8( ifo.author.c_str() ) + "\n\n";
|
|
|
|
|
|
|
|
if( !ifo.email.empty() )
|
|
|
|
dictionaryDescription += "E-mail: " + QString::fromUtf8( ifo.email.c_str() ) + "\n\n";
|
|
|
|
|
2012-09-07 13:58:45 +00:00
|
|
|
if( !ifo.description.empty() )
|
|
|
|
{
|
2012-11-19 15:30:26 +00:00
|
|
|
QString desc = QString::fromUtf8( ifo.description.c_str() );
|
|
|
|
desc.replace( "\t", "<br/>" );
|
|
|
|
desc.replace( "\\n", "<br/>" );
|
|
|
|
dictionaryDescription += Html::unescape( desc );
|
2012-09-07 13:58:45 +00:00
|
|
|
}
|
|
|
|
|
2012-11-19 15:30:26 +00:00
|
|
|
if( dictionaryDescription.isEmpty() )
|
|
|
|
dictionaryDescription = "NONE";
|
|
|
|
|
2012-09-07 13:58:45 +00:00
|
|
|
return dictionaryDescription;
|
|
|
|
}
|
2009-04-16 11:33:12 +00:00
|
|
|
|
|
|
|
/// StardictDictionary::findHeadwordsForSynonym()
|
|
|
|
|
|
|
|
class StardictHeadwordsRequest;
|
|
|
|
|
|
|
|
class StardictHeadwordsRequestRunnable: public QRunnable
|
2009-01-28 20:55:45 +00:00
|
|
|
{
|
2009-04-16 11:33:12 +00:00
|
|
|
StardictHeadwordsRequest & r;
|
|
|
|
QSemaphore & hasExited;
|
2009-04-21 19:03:16 +00:00
|
|
|
|
2009-04-16 11:33:12 +00:00
|
|
|
public:
|
|
|
|
|
|
|
|
StardictHeadwordsRequestRunnable( StardictHeadwordsRequest & r_,
|
|
|
|
QSemaphore & hasExited_ ): r( r_ ),
|
|
|
|
hasExited( hasExited_ )
|
|
|
|
{}
|
2009-01-28 20:55:45 +00:00
|
|
|
|
2009-04-16 11:33:12 +00:00
|
|
|
~StardictHeadwordsRequestRunnable()
|
|
|
|
{
|
|
|
|
hasExited.release();
|
|
|
|
}
|
2009-04-21 19:03:16 +00:00
|
|
|
|
2009-04-16 11:33:12 +00:00
|
|
|
virtual void run();
|
|
|
|
};
|
|
|
|
|
|
|
|
class StardictHeadwordsRequest: public Dictionary::WordSearchRequest
|
|
|
|
{
|
|
|
|
friend class StardictHeadwordsRequestRunnable;
|
|
|
|
|
|
|
|
wstring word;
|
|
|
|
StardictDictionary & dict;
|
|
|
|
|
|
|
|
QAtomicInt isCancelled;
|
|
|
|
QSemaphore hasExited;
|
|
|
|
|
|
|
|
public:
|
|
|
|
|
|
|
|
StardictHeadwordsRequest( wstring const & word_,
|
|
|
|
StardictDictionary & dict_ ):
|
|
|
|
word( word_ ), dict( dict_ )
|
|
|
|
{
|
|
|
|
QThreadPool::globalInstance()->start(
|
|
|
|
new StardictHeadwordsRequestRunnable( *this, hasExited ) );
|
|
|
|
}
|
2009-01-28 20:55:45 +00:00
|
|
|
|
2009-04-16 11:33:12 +00:00
|
|
|
void run(); // Run from another thread by StardictHeadwordsRequestRunnable
|
|
|
|
|
|
|
|
virtual void cancel()
|
|
|
|
{
|
|
|
|
isCancelled.ref();
|
|
|
|
}
|
2009-04-21 19:03:16 +00:00
|
|
|
|
2009-04-16 11:33:12 +00:00
|
|
|
~StardictHeadwordsRequest()
|
|
|
|
{
|
|
|
|
isCancelled.ref();
|
|
|
|
hasExited.acquire();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
void StardictHeadwordsRequestRunnable::run()
|
|
|
|
{
|
|
|
|
r.run();
|
|
|
|
}
|
|
|
|
|
|
|
|
void StardictHeadwordsRequest::run()
|
|
|
|
{
|
|
|
|
if ( isCancelled )
|
|
|
|
{
|
|
|
|
finish();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2010-05-01 21:51:57 +00:00
|
|
|
try
|
|
|
|
{
|
|
|
|
vector< WordArticleLink > chain = dict.findArticles( word );
|
2009-04-16 11:33:12 +00:00
|
|
|
|
2010-05-01 21:51:57 +00:00
|
|
|
wstring caseFolded = Folding::applySimpleCaseOnly( word );
|
2009-01-28 20:55:45 +00:00
|
|
|
|
2010-05-01 21:51:57 +00:00
|
|
|
for( unsigned x = 0; x < chain.size(); ++x )
|
2009-04-16 11:33:12 +00:00
|
|
|
{
|
2010-05-01 21:51:57 +00:00
|
|
|
if ( isCancelled )
|
|
|
|
{
|
|
|
|
finish();
|
|
|
|
return;
|
|
|
|
}
|
2009-04-16 11:33:12 +00:00
|
|
|
|
2010-05-01 21:51:57 +00:00
|
|
|
string headword, articleText;
|
2009-01-28 20:55:45 +00:00
|
|
|
|
2010-05-01 21:51:57 +00:00
|
|
|
dict.loadArticle( chain[ x ].articleOffset,
|
|
|
|
headword, articleText );
|
2009-01-28 20:55:45 +00:00
|
|
|
|
2010-05-01 21:51:57 +00:00
|
|
|
wstring headwordDecoded = Utf8::decode( headword );
|
2009-01-28 20:55:45 +00:00
|
|
|
|
2010-05-01 21:51:57 +00:00
|
|
|
if ( caseFolded != Folding::applySimpleCaseOnly( headwordDecoded ) )
|
|
|
|
{
|
|
|
|
// The headword seems to differ from the input word, which makes the
|
|
|
|
// input word its synonym.
|
|
|
|
Mutex::Lock _( dataMutex );
|
2009-04-16 11:33:12 +00:00
|
|
|
|
2010-05-01 21:51:57 +00:00
|
|
|
matches.push_back( headwordDecoded );
|
|
|
|
}
|
2009-01-28 20:55:45 +00:00
|
|
|
}
|
|
|
|
}
|
2010-05-01 21:51:57 +00:00
|
|
|
catch( std::exception & e )
|
|
|
|
{
|
|
|
|
setErrorString( QString::fromUtf8( e.what() ) );
|
|
|
|
}
|
2009-01-28 20:55:45 +00:00
|
|
|
|
2009-04-16 11:33:12 +00:00
|
|
|
finish();
|
2009-01-28 20:55:45 +00:00
|
|
|
}
|
|
|
|
|
2009-04-16 11:33:12 +00:00
|
|
|
sptr< Dictionary::WordSearchRequest >
|
|
|
|
StardictDictionary::findHeadwordsForSynonym( wstring const & word )
|
2009-03-26 19:00:08 +00:00
|
|
|
throw( std::exception )
|
2009-01-28 20:55:45 +00:00
|
|
|
{
|
2009-04-16 11:33:12 +00:00
|
|
|
return new StardictHeadwordsRequest( word, *this );
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/// StardictDictionary::getArticle()
|
|
|
|
|
|
|
|
class StardictArticleRequest;
|
|
|
|
|
|
|
|
class StardictArticleRequestRunnable: public QRunnable
|
|
|
|
{
|
|
|
|
StardictArticleRequest & r;
|
|
|
|
QSemaphore & hasExited;
|
2009-04-21 19:03:16 +00:00
|
|
|
|
2009-04-16 11:33:12 +00:00
|
|
|
public:
|
|
|
|
|
|
|
|
StardictArticleRequestRunnable( StardictArticleRequest & r_,
|
|
|
|
QSemaphore & hasExited_ ): r( r_ ),
|
|
|
|
hasExited( hasExited_ )
|
|
|
|
{}
|
|
|
|
|
|
|
|
~StardictArticleRequestRunnable()
|
|
|
|
{
|
|
|
|
hasExited.release();
|
|
|
|
}
|
2009-04-21 19:03:16 +00:00
|
|
|
|
2009-04-16 11:33:12 +00:00
|
|
|
virtual void run();
|
|
|
|
};
|
|
|
|
|
|
|
|
class StardictArticleRequest: public Dictionary::DataRequest
|
|
|
|
{
|
|
|
|
friend class StardictArticleRequestRunnable;
|
|
|
|
|
|
|
|
wstring word;
|
|
|
|
vector< wstring > alts;
|
|
|
|
StardictDictionary & dict;
|
|
|
|
|
|
|
|
QAtomicInt isCancelled;
|
|
|
|
QSemaphore hasExited;
|
|
|
|
|
|
|
|
public:
|
|
|
|
|
|
|
|
StardictArticleRequest( wstring const & word_,
|
|
|
|
vector< wstring > const & alts_,
|
|
|
|
StardictDictionary & dict_ ):
|
|
|
|
word( word_ ), alts( alts_ ), dict( dict_ )
|
|
|
|
{
|
|
|
|
QThreadPool::globalInstance()->start(
|
|
|
|
new StardictArticleRequestRunnable( *this, hasExited ) );
|
|
|
|
}
|
|
|
|
|
|
|
|
void run(); // Run from another thread by StardictArticleRequestRunnable
|
|
|
|
|
|
|
|
virtual void cancel()
|
|
|
|
{
|
|
|
|
isCancelled.ref();
|
|
|
|
}
|
2009-04-21 19:03:16 +00:00
|
|
|
|
2009-04-16 11:33:12 +00:00
|
|
|
~StardictArticleRequest()
|
|
|
|
{
|
|
|
|
isCancelled.ref();
|
|
|
|
hasExited.acquire();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
void StardictArticleRequestRunnable::run()
|
|
|
|
{
|
|
|
|
r.run();
|
|
|
|
}
|
|
|
|
|
|
|
|
void StardictArticleRequest::run()
|
|
|
|
{
|
|
|
|
if ( isCancelled )
|
|
|
|
{
|
|
|
|
finish();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2010-05-01 21:51:57 +00:00
|
|
|
try
|
2009-01-28 20:55:45 +00:00
|
|
|
{
|
2010-05-01 21:51:57 +00:00
|
|
|
vector< WordArticleLink > chain = dict.findArticles( word );
|
2009-01-28 20:55:45 +00:00
|
|
|
|
2010-05-01 21:51:57 +00:00
|
|
|
for( unsigned x = 0; x < alts.size(); ++x )
|
|
|
|
{
|
|
|
|
/// Make an additional query for each alt
|
2009-01-28 20:55:45 +00:00
|
|
|
|
2010-05-01 21:51:57 +00:00
|
|
|
vector< WordArticleLink > altChain = dict.findArticles( alts[ x ] );
|
2009-01-28 20:55:45 +00:00
|
|
|
|
2010-05-01 21:51:57 +00:00
|
|
|
chain.insert( chain.end(), altChain.begin(), altChain.end() );
|
|
|
|
}
|
2009-01-28 20:55:45 +00:00
|
|
|
|
2010-05-01 21:51:57 +00:00
|
|
|
multimap< wstring, pair< string, string > > mainArticles, alternateArticles;
|
2009-01-28 20:55:45 +00:00
|
|
|
|
2010-05-01 21:51:57 +00:00
|
|
|
set< uint32_t > articlesIncluded; // Some synonims make it that the articles
|
|
|
|
// appear several times. We combat this
|
|
|
|
// by only allowing them to appear once.
|
2009-01-28 20:55:45 +00:00
|
|
|
|
2010-05-01 21:51:57 +00:00
|
|
|
wstring wordCaseFolded = Folding::applySimpleCaseOnly( word );
|
|
|
|
|
|
|
|
for( unsigned x = 0; x < chain.size(); ++x )
|
2009-04-16 11:33:12 +00:00
|
|
|
{
|
2010-05-01 21:51:57 +00:00
|
|
|
if ( isCancelled )
|
|
|
|
{
|
|
|
|
finish();
|
|
|
|
return;
|
|
|
|
}
|
2009-04-16 11:33:12 +00:00
|
|
|
|
2010-05-01 21:51:57 +00:00
|
|
|
if ( articlesIncluded.find( chain[ x ].articleOffset ) != articlesIncluded.end() )
|
|
|
|
continue; // We already have this article in the body.
|
2009-01-28 20:55:45 +00:00
|
|
|
|
2010-05-01 21:51:57 +00:00
|
|
|
// Now grab that article
|
2009-01-28 20:55:45 +00:00
|
|
|
|
2010-05-01 21:51:57 +00:00
|
|
|
string headword, articleText;
|
2009-01-28 20:55:45 +00:00
|
|
|
|
2010-05-01 21:51:57 +00:00
|
|
|
dict.loadArticle( chain[ x ].articleOffset, headword, articleText );
|
2009-01-28 20:55:45 +00:00
|
|
|
|
2010-05-01 21:51:57 +00:00
|
|
|
// Ok. Now, does it go to main articles, or to alternate ones? We list
|
|
|
|
// main ones first, and alternates after.
|
2009-01-28 20:55:45 +00:00
|
|
|
|
2010-05-01 21:51:57 +00:00
|
|
|
// We do the case-folded comparison here.
|
2009-01-28 20:55:45 +00:00
|
|
|
|
2010-05-01 21:51:57 +00:00
|
|
|
wstring headwordStripped =
|
|
|
|
Folding::applySimpleCaseOnly( Utf8::decode( headword ) );
|
2009-01-28 20:55:45 +00:00
|
|
|
|
2010-05-01 21:51:57 +00:00
|
|
|
multimap< wstring, pair< string, string > > & mapToUse =
|
|
|
|
( wordCaseFolded == headwordStripped ) ?
|
|
|
|
mainArticles : alternateArticles;
|
2009-01-28 20:55:45 +00:00
|
|
|
|
2010-05-01 21:51:57 +00:00
|
|
|
mapToUse.insert( pair< wstring, pair< string, string > >(
|
|
|
|
Folding::applySimpleCaseOnly( Utf8::decode( headword ) ),
|
|
|
|
pair< string, string >( headword, articleText ) ) );
|
2009-01-28 20:55:45 +00:00
|
|
|
|
2010-05-01 21:51:57 +00:00
|
|
|
articlesIncluded.insert( chain[ x ].articleOffset );
|
|
|
|
}
|
2009-01-28 20:55:45 +00:00
|
|
|
|
2010-05-01 21:51:57 +00:00
|
|
|
if ( mainArticles.empty() && alternateArticles.empty() )
|
|
|
|
{
|
|
|
|
// No such word
|
|
|
|
finish();
|
|
|
|
return;
|
|
|
|
}
|
2009-01-28 20:55:45 +00:00
|
|
|
|
2010-05-01 21:51:57 +00:00
|
|
|
string result;
|
2009-01-28 20:55:45 +00:00
|
|
|
|
2010-05-01 21:51:57 +00:00
|
|
|
multimap< wstring, pair< string, string > >::const_iterator i;
|
2009-01-28 20:55:45 +00:00
|
|
|
|
2010-05-01 21:51:57 +00:00
|
|
|
string cleaner = "</font>""</font>""</font>""</font>""</font>""</font>"
|
|
|
|
"</font>""</font>""</font>""</font>""</font>""</font>"
|
|
|
|
"</b></b></b></b></b></b></b></b>"
|
|
|
|
"</i></i></i></i></i></i></i></i>";
|
2009-01-28 20:55:45 +00:00
|
|
|
|
2010-05-01 21:51:57 +00:00
|
|
|
for( i = mainArticles.begin(); i != mainArticles.end(); ++i )
|
|
|
|
{
|
|
|
|
result += "<h3>";
|
|
|
|
result += i->second.first;
|
|
|
|
result += "</h3>";
|
|
|
|
result += i->second.second;
|
|
|
|
result += cleaner;
|
|
|
|
}
|
2009-01-28 20:55:45 +00:00
|
|
|
|
2010-05-01 21:51:57 +00:00
|
|
|
for( i = alternateArticles.begin(); i != alternateArticles.end(); ++i )
|
|
|
|
{
|
|
|
|
result += "<h3>";
|
|
|
|
result += i->second.first;
|
|
|
|
result += "</h3>";
|
|
|
|
result += i->second.second;
|
|
|
|
result += cleaner;
|
|
|
|
}
|
2010-10-11 02:40:44 +00:00
|
|
|
result = QString::fromUtf8( result.c_str() )
|
|
|
|
.replace( QRegExp( "(<\\s*a\\s+[^>]*href\\s*=\\s*[\"']\\s*)bword://", Qt::CaseInsensitive ),
|
|
|
|
"\\1bword:" )
|
|
|
|
.toUtf8().data();
|
2009-01-28 20:55:45 +00:00
|
|
|
|
2010-05-01 21:51:57 +00:00
|
|
|
Mutex::Lock _( dataMutex );
|
2009-03-26 19:00:08 +00:00
|
|
|
|
2010-05-01 21:51:57 +00:00
|
|
|
data.resize( result.size() );
|
2009-03-26 19:00:08 +00:00
|
|
|
|
2010-05-01 21:51:57 +00:00
|
|
|
memcpy( &data.front(), result.data(), result.size() );
|
2009-03-26 19:00:08 +00:00
|
|
|
|
2010-05-01 21:51:57 +00:00
|
|
|
hasAnyData = true;
|
|
|
|
}
|
|
|
|
catch( std::exception & e )
|
|
|
|
{
|
|
|
|
setErrorString( QString::fromUtf8( e.what() ) );
|
|
|
|
}
|
2009-04-16 11:33:12 +00:00
|
|
|
|
|
|
|
finish();
|
2009-01-28 20:55:45 +00:00
|
|
|
}
|
|
|
|
|
2009-04-16 11:33:12 +00:00
|
|
|
sptr< Dictionary::DataRequest > StardictDictionary::getArticle( wstring const & word,
|
2009-05-29 19:48:50 +00:00
|
|
|
vector< wstring > const & alts,
|
|
|
|
wstring const & )
|
2009-04-16 11:33:12 +00:00
|
|
|
throw( std::exception )
|
|
|
|
{
|
|
|
|
return new StardictArticleRequest( word, alts, *this );
|
|
|
|
}
|
2009-01-28 20:55:45 +00:00
|
|
|
|
|
|
|
|
|
|
|
static char const * beginsWith( char const * substr, char const * str )
|
|
|
|
{
|
|
|
|
size_t len = strlen( substr );
|
|
|
|
|
|
|
|
return strncmp( str, substr, len ) == 0 ? str + len : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
Ifo::Ifo( File::Class & f ):
|
|
|
|
wordcount( 0 ), synwordcount( 0 ), idxfilesize( 0 ), idxoffsetbits( 32 )
|
|
|
|
{
|
|
|
|
static string const versionEq( "version=" );
|
|
|
|
|
|
|
|
static string const booknameEq( "bookname=" );
|
|
|
|
|
2011-06-19 18:50:11 +00:00
|
|
|
//DPRINTF( "%s<\n", f.gets().c_str() );
|
|
|
|
//DPRINTF( "%s<\n", f.gets().c_str() );
|
2009-01-28 20:55:45 +00:00
|
|
|
|
2012-07-21 01:04:57 +00:00
|
|
|
if ( QString::fromUtf8(f.gets().c_str()) != "StarDict's dict ifo file" ||
|
2009-01-28 20:55:45 +00:00
|
|
|
f.gets().compare( 0, versionEq.size(), versionEq ) )
|
|
|
|
throw exNotAnIfoFile();
|
|
|
|
|
|
|
|
/// Now go through the file and parse options
|
|
|
|
|
|
|
|
try
|
|
|
|
{
|
|
|
|
char option[ 16384 ];
|
|
|
|
|
|
|
|
for( ; ; )
|
|
|
|
{
|
|
|
|
if ( !f.gets( option, sizeof( option ), true ) )
|
|
|
|
break;
|
|
|
|
|
|
|
|
if ( char const * val = beginsWith( "bookname=", option ) )
|
|
|
|
bookname = val;
|
|
|
|
else
|
|
|
|
if ( char const * val = beginsWith( "wordcount=", option ) )
|
|
|
|
{
|
|
|
|
if ( sscanf( val, "%u", & wordcount ) != 1 )
|
|
|
|
throw exBadFieldInIfo( option );
|
|
|
|
}
|
|
|
|
else
|
|
|
|
if ( char const * val = beginsWith( "synwordcount=", option ) )
|
|
|
|
{
|
|
|
|
if ( sscanf( val, "%u", & synwordcount ) != 1 )
|
|
|
|
throw exBadFieldInIfo( option );
|
|
|
|
}
|
|
|
|
else
|
|
|
|
if ( char const * val = beginsWith( "idxfilesize=", option ) )
|
|
|
|
{
|
|
|
|
if ( sscanf( val, "%u", & idxfilesize ) != 1 )
|
|
|
|
throw exBadFieldInIfo( option );
|
|
|
|
}
|
|
|
|
else
|
|
|
|
if ( char const * val = beginsWith( "idxoffsetbits=", option ) )
|
|
|
|
{
|
|
|
|
if ( sscanf( val, "%u", & idxoffsetbits ) != 1 || ( idxoffsetbits != 32
|
|
|
|
&& idxoffsetbits != 64 ) )
|
|
|
|
throw exBadFieldInIfo( option );
|
|
|
|
}
|
|
|
|
else
|
|
|
|
if ( char const * val = beginsWith( "sametypesequence=", option ) )
|
|
|
|
sametypesequence = val;
|
|
|
|
else
|
|
|
|
if ( char const * val = beginsWith( "dicttype=", option ) )
|
|
|
|
dicttype = val;
|
2012-09-07 13:58:45 +00:00
|
|
|
else
|
|
|
|
if ( char const * val = beginsWith( "description=", option ) )
|
|
|
|
description = val;
|
2012-11-19 15:30:26 +00:00
|
|
|
else
|
|
|
|
if ( char const * val = beginsWith( "copyright=", option ) )
|
|
|
|
copyright = val;
|
|
|
|
else
|
|
|
|
if ( char const * val = beginsWith( "author=", option ) )
|
|
|
|
author = val;
|
|
|
|
else
|
|
|
|
if ( char const * val = beginsWith( "email=", option ) )
|
|
|
|
email = val;
|
2009-01-28 20:55:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
catch( File::exReadError & )
|
|
|
|
{
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
} // anonymous namespace
|
|
|
|
|
|
|
|
static void findCorrespondingFiles( string const & ifo,
|
2009-04-14 16:35:47 +00:00
|
|
|
string & idx, string & dict, string & syn )
|
2009-01-28 20:55:45 +00:00
|
|
|
{
|
|
|
|
string base( ifo, 0, ifo.size() - 3 );
|
|
|
|
|
|
|
|
if ( !(
|
2012-01-30 13:11:41 +00:00
|
|
|
File::tryPossibleName( base + "idx", idx ) ||
|
|
|
|
File::tryPossibleName( base + "idx.gz", idx ) ||
|
|
|
|
File::tryPossibleName( base + "idx.dz", idx ) ||
|
|
|
|
File::tryPossibleName( base + "IDX", idx ) ||
|
|
|
|
File::tryPossibleName( base + "IDX.GZ", idx ) ||
|
|
|
|
File::tryPossibleName( base + "IDX.DZ", idx )
|
2009-01-28 20:55:45 +00:00
|
|
|
) )
|
|
|
|
throw exNoIdxFile( ifo );
|
|
|
|
|
|
|
|
if ( !(
|
2012-01-30 13:11:41 +00:00
|
|
|
File::tryPossibleName( base + "dict", dict ) ||
|
|
|
|
File::tryPossibleName( base + "dict.dz", dict ) ||
|
|
|
|
File::tryPossibleName( base + "DICT", dict ) ||
|
|
|
|
File::tryPossibleName( base + "dict.DZ", dict )
|
2009-01-28 20:55:45 +00:00
|
|
|
) )
|
|
|
|
throw exNoDictFile( ifo );
|
|
|
|
|
2009-04-14 16:35:47 +00:00
|
|
|
if ( !(
|
2012-01-30 13:11:41 +00:00
|
|
|
File::tryPossibleName( base + "syn", syn ) ||
|
|
|
|
File::tryPossibleName( base + "syn.gz", syn ) ||
|
|
|
|
File::tryPossibleName( base + "syn.dz", syn ) ||
|
|
|
|
File::tryPossibleName( base + "SYN", syn ) ||
|
|
|
|
File::tryPossibleName( base + "SYN.GZ", syn ) ||
|
|
|
|
File::tryPossibleName( base + "SYN.DZ", syn )
|
2009-01-28 20:55:45 +00:00
|
|
|
) )
|
2009-04-14 16:35:47 +00:00
|
|
|
syn.clear();
|
2009-01-28 20:55:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void handleIdxSynFile( string const & fileName,
|
|
|
|
IndexedWords & indexedWords,
|
|
|
|
ChunkedStorage::Writer & chunks,
|
|
|
|
vector< uint32_t > * articleOffsets,
|
|
|
|
bool isSynFile )
|
|
|
|
{
|
2012-01-25 16:35:00 +00:00
|
|
|
gzFile stardictIdx = gd_gzopen( fileName.c_str() );
|
2011-09-09 12:05:28 +00:00
|
|
|
if ( !stardictIdx )
|
|
|
|
throw exCantReadFile( fileName );
|
2009-01-28 20:55:45 +00:00
|
|
|
|
|
|
|
vector< char > image;
|
|
|
|
|
|
|
|
for( ; ; )
|
|
|
|
{
|
|
|
|
size_t oldSize = image.size();
|
|
|
|
|
|
|
|
image.resize( oldSize + 65536 );
|
|
|
|
|
|
|
|
int rd = gzread( stardictIdx, &image.front() + oldSize, 65536 );
|
|
|
|
|
|
|
|
if ( rd < 0 )
|
|
|
|
{
|
|
|
|
gzclose( stardictIdx );
|
|
|
|
throw exCantReadFile( fileName );
|
|
|
|
}
|
|
|
|
|
|
|
|
if ( rd != 65536 )
|
|
|
|
{
|
|
|
|
image.resize( oldSize + rd + 1 );
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2011-09-09 12:05:28 +00:00
|
|
|
gzclose( stardictIdx );
|
2009-01-28 20:55:45 +00:00
|
|
|
|
|
|
|
// We append one zero byte to catch runaway string at the end, if any
|
|
|
|
|
|
|
|
image.back() = 0;
|
|
|
|
|
|
|
|
// Now parse it
|
|
|
|
|
|
|
|
for( char const * ptr = &image.front(); ptr != &image.back(); )
|
|
|
|
{
|
|
|
|
size_t wordLen = strlen( ptr );
|
|
|
|
|
|
|
|
if ( ptr + wordLen + 1 + ( isSynFile ? sizeof( uint32_t ) :
|
|
|
|
sizeof( uint32_t ) * 2 ) >
|
|
|
|
&image.back() )
|
2009-05-26 10:25:40 +00:00
|
|
|
{
|
2011-06-19 20:29:11 +00:00
|
|
|
FDPRINTF( stderr, "Warning: sudden end of file %s\n", fileName.c_str() );
|
2009-05-26 10:25:40 +00:00
|
|
|
break;
|
|
|
|
}
|
2009-01-28 20:55:45 +00:00
|
|
|
|
|
|
|
char const * word = ptr;
|
|
|
|
|
|
|
|
ptr += wordLen + 1;
|
|
|
|
|
|
|
|
uint32_t offset;
|
|
|
|
|
2013-02-18 18:10:06 +00:00
|
|
|
if( strstr( word, "&#" ) )
|
|
|
|
{
|
|
|
|
// Decode some html-coded symbols in headword
|
|
|
|
string unescapedWord = Html::unescapeUtf8( word );
|
|
|
|
strncpy( (char *)word, unescapedWord.c_str(), wordLen );
|
|
|
|
wordLen = strlen( word );
|
|
|
|
}
|
|
|
|
|
2009-01-28 20:55:45 +00:00
|
|
|
if ( !isSynFile )
|
|
|
|
{
|
|
|
|
// We're processing the .idx file
|
|
|
|
uint32_t articleOffset, articleSize;
|
|
|
|
|
|
|
|
memcpy( &articleOffset, ptr, sizeof( uint32_t ) );
|
|
|
|
ptr += sizeof( uint32_t );
|
|
|
|
memcpy( &articleSize, ptr, sizeof( uint32_t ) );
|
|
|
|
ptr += sizeof( uint32_t );
|
|
|
|
|
|
|
|
articleOffset = ntohl( articleOffset );
|
|
|
|
articleSize = ntohl( articleSize );
|
|
|
|
|
|
|
|
// Create an entry for the article in the chunked storage
|
|
|
|
|
|
|
|
offset = chunks.startNewBlock();
|
|
|
|
|
|
|
|
if ( articleOffsets )
|
|
|
|
articleOffsets->push_back( offset );
|
|
|
|
|
|
|
|
chunks.addToBlock( &articleOffset, sizeof( uint32_t ) );
|
|
|
|
chunks.addToBlock( &articleSize, sizeof( uint32_t ) );
|
|
|
|
chunks.addToBlock( word, wordLen + 1 );
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
// We're processing the .syn file
|
|
|
|
uint32_t offsetInIndex;
|
|
|
|
|
|
|
|
memcpy( &offsetInIndex, ptr, sizeof( uint32_t ) );
|
|
|
|
ptr += sizeof( uint32_t );
|
|
|
|
|
|
|
|
offsetInIndex = ntohl( offsetInIndex );
|
|
|
|
|
|
|
|
if ( offsetInIndex >= articleOffsets->size() )
|
|
|
|
throw exIncorrectOffset( fileName );
|
|
|
|
|
|
|
|
offset = (*articleOffsets)[ offsetInIndex ];
|
2009-10-19 23:05:28 +00:00
|
|
|
|
|
|
|
// Some StarDict dictionaries are in fact badly converted Babylon ones.
|
|
|
|
// They contain a lot of superfluous slashed entries with dollar signs.
|
|
|
|
// We try to filter them out here, since those entries become much more
|
|
|
|
// apparent in GoldenDict than they were in StarDict because of
|
|
|
|
// punctuation folding. Hopefully there are not a whole lot of valid
|
|
|
|
// synonyms which really start from slash and contain dollar signs, or
|
|
|
|
// end with dollar and contain slashes.
|
|
|
|
if ( *word == '/' )
|
|
|
|
{
|
|
|
|
if ( strchr( word, '$' ) )
|
|
|
|
continue; // Skip this entry
|
|
|
|
}
|
|
|
|
else
|
|
|
|
if ( wordLen && word[ wordLen - 1 ] == '$' )
|
|
|
|
{
|
|
|
|
if ( strchr( word, '/' ) )
|
|
|
|
continue; // Skip this entry
|
|
|
|
}
|
2009-01-28 20:55:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Insert new entry into an index
|
|
|
|
|
2009-04-08 16:02:12 +00:00
|
|
|
indexedWords.addWord( Utf8::decode( word ), offset );
|
2009-01-28 20:55:45 +00:00
|
|
|
}
|
|
|
|
|
2012-10-31 13:58:35 +00:00
|
|
|
DPRINTF( "%u entires made\n", (unsigned) indexedWords.size() );
|
2009-01-28 20:55:45 +00:00
|
|
|
}
|
|
|
|
|
2012-02-29 13:00:38 +00:00
|
|
|
|
|
|
|
//// StardictDictionary::getResource()
|
|
|
|
|
|
|
|
class StardictResourceRequest;
|
|
|
|
|
|
|
|
class StardictResourceRequestRunnable: public QRunnable
|
|
|
|
{
|
|
|
|
StardictResourceRequest & r;
|
|
|
|
QSemaphore & hasExited;
|
|
|
|
|
|
|
|
public:
|
|
|
|
|
|
|
|
StardictResourceRequestRunnable( StardictResourceRequest & r_,
|
|
|
|
QSemaphore & hasExited_ ): r( r_ ),
|
|
|
|
hasExited( hasExited_ )
|
|
|
|
{}
|
|
|
|
|
|
|
|
~StardictResourceRequestRunnable()
|
|
|
|
{
|
|
|
|
hasExited.release();
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual void run();
|
|
|
|
};
|
|
|
|
|
|
|
|
class StardictResourceRequest: public Dictionary::DataRequest
|
|
|
|
{
|
|
|
|
friend class StardictResourceRequestRunnable;
|
|
|
|
|
|
|
|
StardictDictionary & dict;
|
|
|
|
|
|
|
|
string resourceName;
|
|
|
|
|
|
|
|
QAtomicInt isCancelled;
|
|
|
|
QSemaphore hasExited;
|
|
|
|
|
|
|
|
public:
|
|
|
|
|
|
|
|
StardictResourceRequest( StardictDictionary & dict_,
|
|
|
|
string const & resourceName_ ):
|
|
|
|
dict( dict_ ),
|
|
|
|
resourceName( resourceName_ )
|
|
|
|
{
|
|
|
|
QThreadPool::globalInstance()->start(
|
|
|
|
new StardictResourceRequestRunnable( *this, hasExited ) );
|
|
|
|
}
|
|
|
|
|
|
|
|
void run(); // Run from another thread by StardictResourceRequestRunnable
|
|
|
|
|
|
|
|
virtual void cancel()
|
|
|
|
{
|
|
|
|
isCancelled.ref();
|
|
|
|
}
|
|
|
|
|
|
|
|
~StardictResourceRequest()
|
|
|
|
{
|
|
|
|
isCancelled.ref();
|
|
|
|
hasExited.acquire();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
void StardictResourceRequestRunnable::run()
|
|
|
|
{
|
|
|
|
r.run();
|
|
|
|
}
|
|
|
|
|
|
|
|
void StardictResourceRequest::run()
|
|
|
|
{
|
|
|
|
// Some runnables linger enough that they are cancelled before they start
|
|
|
|
if ( isCancelled )
|
|
|
|
{
|
|
|
|
finish();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
try
|
|
|
|
{
|
|
|
|
if( resourceName.at( 0 ) == '\x1E' )
|
|
|
|
resourceName = resourceName.erase( 0, 1 );
|
|
|
|
if( resourceName.at( resourceName.length() - 1 ) == '\x1F' )
|
|
|
|
resourceName.erase( resourceName.length() - 1, 1 );
|
|
|
|
|
|
|
|
string n =
|
|
|
|
FsEncoding::dirname( dict.getDictionaryFilenames()[ 0 ] ) +
|
|
|
|
FsEncoding::separator() +
|
|
|
|
"res" +
|
|
|
|
FsEncoding::separator() +
|
|
|
|
FsEncoding::encode( resourceName );
|
|
|
|
|
|
|
|
DPRINTF( "n is %s\n", n.c_str() );
|
|
|
|
|
|
|
|
{
|
|
|
|
Mutex::Lock _( dataMutex );
|
|
|
|
|
|
|
|
File::loadFromFile( n, data );
|
|
|
|
}
|
|
|
|
|
|
|
|
if ( Filetype::isNameOfTiff( resourceName ) )
|
|
|
|
{
|
|
|
|
// Convert it
|
|
|
|
|
|
|
|
dataMutex.lock();
|
|
|
|
|
|
|
|
QImage img = QImage::fromData( (unsigned char *) &data.front(),
|
|
|
|
data.size() );
|
|
|
|
|
|
|
|
dataMutex.unlock();
|
|
|
|
|
|
|
|
if ( !img.isNull() )
|
|
|
|
{
|
|
|
|
// Managed to load -- now store it back as BMP
|
|
|
|
|
|
|
|
QByteArray ba;
|
|
|
|
QBuffer buffer( &ba );
|
|
|
|
buffer.open( QIODevice::WriteOnly );
|
|
|
|
img.save( &buffer, "BMP" );
|
|
|
|
|
|
|
|
Mutex::Lock _( dataMutex );
|
|
|
|
|
|
|
|
data.resize( buffer.size() );
|
|
|
|
|
|
|
|
memcpy( &data.front(), buffer.data(), data.size() );
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Mutex::Lock _( dataMutex );
|
|
|
|
|
|
|
|
hasAnyData = true;
|
|
|
|
}
|
|
|
|
catch( File::Ex & )
|
|
|
|
{
|
|
|
|
// No such resource -- we don't set the hasAnyData flag then
|
|
|
|
}
|
|
|
|
catch( Utf8::exCantDecode )
|
|
|
|
{
|
|
|
|
// Failed to decode some utf8 -- probably the resource name is no good
|
|
|
|
}
|
|
|
|
catch( ... )
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
finish();
|
|
|
|
}
|
|
|
|
|
|
|
|
sptr< Dictionary::DataRequest > StardictDictionary::getResource( string const & name )
|
|
|
|
throw( std::exception )
|
|
|
|
{
|
|
|
|
return new StardictResourceRequest( *this, name );
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-03-26 19:00:08 +00:00
|
|
|
vector< sptr< Dictionary::Class > > makeDictionaries(
|
|
|
|
vector< string > const & fileNames,
|
|
|
|
string const & indicesDir,
|
|
|
|
Dictionary::Initializing & initializing )
|
2009-01-28 20:55:45 +00:00
|
|
|
throw( std::exception )
|
|
|
|
{
|
|
|
|
vector< sptr< Dictionary::Class > > dictionaries;
|
|
|
|
|
|
|
|
for( vector< string >::const_iterator i = fileNames.begin(); i != fileNames.end();
|
|
|
|
++i )
|
|
|
|
{
|
|
|
|
if ( i->size() < 4 ||
|
|
|
|
strcasecmp( i->c_str() + ( i->size() - 4 ), ".ifo" ) != 0 )
|
|
|
|
continue;
|
|
|
|
|
|
|
|
try
|
|
|
|
{
|
|
|
|
vector< string > dictFiles( 1, *i );
|
|
|
|
|
2009-05-05 21:57:38 +00:00
|
|
|
string idxFileName, dictFileName, synFileName;
|
2009-01-28 20:55:45 +00:00
|
|
|
|
2009-04-14 16:35:47 +00:00
|
|
|
findCorrespondingFiles( *i, idxFileName, dictFileName, synFileName );
|
2009-01-28 20:55:45 +00:00
|
|
|
|
|
|
|
dictFiles.push_back( idxFileName );
|
|
|
|
dictFiles.push_back( dictFileName );
|
|
|
|
|
2009-04-14 16:35:47 +00:00
|
|
|
if ( synFileName.size() )
|
2009-01-28 20:55:45 +00:00
|
|
|
dictFiles.push_back( synFileName );
|
|
|
|
|
2009-03-26 19:00:08 +00:00
|
|
|
string dictId = Dictionary::makeDictionaryId( dictFiles );
|
2009-01-28 20:55:45 +00:00
|
|
|
|
|
|
|
string indexFile = indicesDir + dictId;
|
|
|
|
|
2009-03-26 19:00:08 +00:00
|
|
|
if ( Dictionary::needToRebuildIndex( dictFiles, indexFile ) ||
|
2009-01-28 20:55:45 +00:00
|
|
|
indexIsOldOrBad( indexFile ) )
|
|
|
|
{
|
|
|
|
// Building the index
|
2009-04-14 16:35:47 +00:00
|
|
|
|
|
|
|
File::Class ifoFile( *i, "r" );
|
|
|
|
|
|
|
|
Ifo ifo( ifoFile );
|
|
|
|
|
|
|
|
if ( ifo.idxoffsetbits == 64 )
|
|
|
|
throw ex64BitsNotSupported();
|
|
|
|
|
|
|
|
if ( ifo.dicttype.size() )
|
|
|
|
throw exDicttypeNotSupported();
|
|
|
|
|
|
|
|
if( synFileName.empty() )
|
|
|
|
{
|
|
|
|
if ( ifo.synwordcount )
|
2011-06-14 06:55:24 +00:00
|
|
|
{
|
2011-06-19 18:50:11 +00:00
|
|
|
DPRINTF( "Warning: dictionary has synwordcount specified, but no "
|
2011-06-14 06:55:24 +00:00
|
|
|
"corresponding .syn file was found\n" );
|
|
|
|
ifo.synwordcount = 0; // Pretend it wasn't there
|
|
|
|
}
|
2009-04-14 16:35:47 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
if ( !ifo.synwordcount )
|
|
|
|
{
|
2011-06-19 18:50:11 +00:00
|
|
|
DPRINTF( "Warning: ignoring .syn file %s, since there's no synwordcount in .ifo specified\n",
|
2009-04-14 16:35:47 +00:00
|
|
|
synFileName.c_str() );
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-06-19 18:50:11 +00:00
|
|
|
DPRINTF( "bookname = %s\n", ifo.bookname.c_str() );
|
|
|
|
DPRINTF( "wordcount = %u\n", ifo.wordcount );
|
2009-04-14 16:35:47 +00:00
|
|
|
|
2009-01-28 20:55:45 +00:00
|
|
|
initializing.indexingDictionary( ifo.bookname );
|
|
|
|
|
|
|
|
File::Class idx( indexFile, "wb" );
|
|
|
|
|
|
|
|
IdxHeader idxHeader;
|
|
|
|
|
|
|
|
memset( &idxHeader, 0, sizeof( idxHeader ) );
|
|
|
|
|
|
|
|
// We write a dummy header first. At the end of the process the header
|
|
|
|
// will be rewritten with the right values.
|
|
|
|
|
|
|
|
idx.write( idxHeader );
|
|
|
|
|
2009-04-14 16:35:47 +00:00
|
|
|
idx.write( ifo.bookname.data(), ifo.bookname.size() );
|
|
|
|
idx.write( ifo.sametypesequence.data(), ifo.sametypesequence.size() );
|
|
|
|
|
2009-01-28 20:55:45 +00:00
|
|
|
IndexedWords indexedWords;
|
|
|
|
|
|
|
|
ChunkedStorage::Writer chunks( idx );
|
|
|
|
|
|
|
|
// Load indices
|
|
|
|
if ( !ifo.synwordcount )
|
|
|
|
handleIdxSynFile( idxFileName, indexedWords, chunks, 0, false );
|
|
|
|
else
|
|
|
|
{
|
|
|
|
vector< uint32_t > articleOffsets;
|
|
|
|
|
|
|
|
articleOffsets.reserve( ifo.wordcount );
|
2009-04-21 19:03:16 +00:00
|
|
|
|
2009-01-28 20:55:45 +00:00
|
|
|
handleIdxSynFile( idxFileName, indexedWords, chunks, &articleOffsets,
|
|
|
|
false );
|
2009-04-21 19:03:16 +00:00
|
|
|
|
2009-01-28 20:55:45 +00:00
|
|
|
handleIdxSynFile( synFileName, indexedWords, chunks, &articleOffsets,
|
|
|
|
true );
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finish with the chunks
|
|
|
|
|
|
|
|
idxHeader.chunksOffset = chunks.finish();
|
|
|
|
|
|
|
|
// Build index
|
|
|
|
|
2009-04-14 16:35:47 +00:00
|
|
|
IndexInfo idxInfo = BtreeIndexing::buildIndex( indexedWords, idx );
|
|
|
|
|
|
|
|
idxHeader.indexBtreeMaxElements = idxInfo.btreeMaxElements;
|
|
|
|
idxHeader.indexRootOffset = idxInfo.rootOffset;
|
2009-01-28 20:55:45 +00:00
|
|
|
|
|
|
|
// That concludes it. Update the header.
|
|
|
|
|
|
|
|
idxHeader.signature = Signature;
|
|
|
|
idxHeader.formatVersion = CurrentFormatVersion;
|
|
|
|
|
2009-04-14 16:35:47 +00:00
|
|
|
idxHeader.wordCount = ifo.wordcount;
|
|
|
|
idxHeader.synWordCount = ifo.synwordcount;
|
|
|
|
idxHeader.bookNameSize = ifo.bookname.size();
|
|
|
|
idxHeader.sameTypeSequenceSize = ifo.sametypesequence.size();
|
|
|
|
|
2009-05-05 21:51:21 +00:00
|
|
|
// read languages
|
2009-04-23 19:57:39 +00:00
|
|
|
QPair<quint32,quint32> langs =
|
|
|
|
LangCoder::findIdsForFilename( QString::fromStdString( dictFileName ) );
|
2009-05-05 21:51:21 +00:00
|
|
|
|
|
|
|
// if no languages found, try dictionary's name
|
|
|
|
if ( langs.first == 0 || langs.second == 0 )
|
|
|
|
{
|
|
|
|
langs =
|
2009-05-05 21:57:38 +00:00
|
|
|
LangCoder::findIdsForFilename( QString::fromStdString( ifo.bookname ) );
|
2009-05-05 21:51:21 +00:00
|
|
|
}
|
|
|
|
|
2009-05-05 22:45:02 +00:00
|
|
|
idxHeader.langFrom = langs.first;
|
|
|
|
idxHeader.langTo = langs.second;
|
|
|
|
|
|
|
|
|
2009-01-28 20:55:45 +00:00
|
|
|
idx.rewind();
|
|
|
|
|
|
|
|
idx.write( &idxHeader, sizeof( idxHeader ) );
|
|
|
|
}
|
|
|
|
|
|
|
|
dictionaries.push_back( new StardictDictionary( dictId,
|
|
|
|
indexFile,
|
2009-04-14 16:35:47 +00:00
|
|
|
dictFiles ) );
|
2009-01-28 20:55:45 +00:00
|
|
|
}
|
|
|
|
catch( std::exception & e )
|
|
|
|
{
|
2011-06-19 20:29:11 +00:00
|
|
|
FDPRINTF( stderr, "Stardict's dictionary reading failed: %s, error: %s\n",
|
2009-01-28 20:55:45 +00:00
|
|
|
i->c_str(), e.what() );
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return dictionaries;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
}
|