/* This file is (c) 2008-2009 Konstantin Isakov
* Part of GoldenDict. Licensed under GPLv3 or later, see the LICENSE file */
#include "dsl.hh"
#include "dsl_details.hh"
#include "btreeidx.hh"
#include "folding.hh"
#include "utf8.hh"
#include "chunkedstorage.hh"
#include "dictzip.h"
#include "htmlescape.hh"
#include "iconv.hh"
#include "filetype.hh"
#include "fsencoding.hh"
#include "audiolink.hh"
#include
#include
#include
" );
return ""
"
" + html + "
";
}
string DslDictionary::processNodeChildren( ArticleDom::Node const & node )
{
string result;
for( ArticleDom::Node::const_iterator i = node.begin(); i != node.end();
++i )
result += nodeToHtml( *i );
return result;
}
string DslDictionary::nodeToHtml( ArticleDom::Node const & node )
{
if ( !node.isTag )
return Html::escape( Utf8::encode( node.text ) );
string result;
if ( node.tagName == L"b" )
result += "" + processNodeChildren( node ) + "";
else
if ( node.tagName == L"i" )
result += "" + processNodeChildren( node ) + "";
else
if ( node.tagName == L"u" )
result += "" + processNodeChildren( node ) + "";
else
if ( node.tagName == L"c" )
{
result += "" + processNodeChildren( node ) + "";
}
else
if ( node.tagName == L"*" )
result += "" + processNodeChildren( node ) + "";
else
if ( node.tagName.size() == 2 && node.tagName[ 0 ] == L'm' &&
iswdigit( node.tagName[ 1 ] ) )
result += "" + processNodeChildren( node ) + "
";
else
if ( node.tagName == L"trn" )
result += "" + processNodeChildren( node ) + "";
else
if ( node.tagName == L"ex" )
result += "" + processNodeChildren( node ) + "";
else
if ( node.tagName == L"com" )
result += "" + processNodeChildren( node ) + "";
else
if ( node.tagName == L"s" )
{
string filename = Utf8::encode( node.renderAsText() );
if ( Filetype::isNameOfSound( filename ) )
{
// If we have the file here, do the exact reference to this dictionary.
// Otherwise, make a global 'search' one.
string n =
FsEncoding::dirname( getDictionaryFilenames()[ 0 ] ) +
FsEncoding::separator() +
FsEncoding::encode( filename );
bool search = true;
try
{
try
{
File::Class f( n, "rb" );
}
catch( File::exCantOpen & )
{
n = getDictionaryFilenames()[ 0 ] + ".files" +
FsEncoding::separator() +
FsEncoding::encode( filename );
try
{
File::Class f( n, "rb" );
}
catch( File::exCantOpen & )
{
// Try zip file
if ( resourceZip )
{
string fname = FsEncoding::encode( filename );
int result = zip_name_locate( resourceZip, fname.c_str(), 0 );
if ( result == -1 )
throw;
}
else
throw;
}
}
search = false;
}
catch( File::Ex & )
{
}
QUrl url;
url.setScheme( "gdau" );
url.setHost( QString::fromUtf8( search ? "search" : getId().c_str() ) );
url.setPath( QString::fromUtf8( filename.c_str() ) );
string ref = string( "\"" ) + url.toEncoded().data() + "\"";
result += addAudioLink( ref );
result += "";
}
else
if ( Filetype::isNameOfPicture( filename ) )
{
QUrl url;
url.setScheme( "bres" );
url.setHost( QString::fromUtf8( getId().c_str() ) );
url.setPath( QString::fromUtf8( filename.c_str() ) );
result += string( "";
}
else
{
// Unknown file type, downgrade to a hyperlink
QUrl url;
url.setScheme( "bres" );
url.setHost( QString::fromUtf8( getId().c_str() ) );
url.setPath( QString::fromUtf8( filename.c_str() ) );
result += string( "" + processNodeChildren( node ) + "";
}
}
else
if ( node.tagName == L"url" )
result += "" + processNodeChildren( node ) + "";
else
if ( node.tagName == L"!trs" )
result += "" + processNodeChildren( node ) + "";
else
if ( node.tagName == L"p" )
{
result += "::const_iterator i = abrv.find( val );
if ( i != abrv.end() )
result += " title=\"" + Html::escape( i->second ) + "\"";
result += ">" + processNodeChildren( node ) + "";
}
else
if ( node.tagName == L"'" )
{
result += "" + processNodeChildren( node ) + Utf8::encode( wstring( 1, 0x301 ) ) + "";
}
else
if ( node.tagName == L"lang" )
{
result += "" + processNodeChildren( node ) + "";
}
else
if ( node.tagName == L"ref" )
{
result += "" + processNodeChildren( node ) + "";
}
else
if ( node.tagName == L"sub" )
{
result += "" + processNodeChildren( node ) + "";
}
else
if ( node.tagName == L"sup" )
{
result += "" + processNodeChildren( node ) + "";
}
else
if ( node.tagName == L"t" )
{
result += "" + processNodeChildren( node ) + "";
}
else
result += "" + processNodeChildren( node ) + "";
return result;
}
#if 0
vector< wstring > StardictDictionary::findHeadwordsForSynonym( wstring const & str )
throw( std::exception )
{
vector< wstring > result;
vector< WordArticleLink > chain = findArticles( str );
wstring caseFolded = Folding::applySimpleCaseOnly( str );
for( unsigned x = 0; x < chain.size(); ++x )
{
string headword, articleText;
loadArticle( chain[ x ].articleOffset,
headword, articleText );
wstring headwordDecoded = Utf8::decode( headword );
if ( caseFolded != Folding::applySimpleCaseOnly( headwordDecoded ) )
{
// The headword seems to differ from the input word, which makes the
// input word its synonym.
result.push_back( headwordDecoded );
}
}
return result;
}
#endif
/// DslDictionary::getArticle()
class DslArticleRequest;
class DslArticleRequestRunnable: public QRunnable
{
DslArticleRequest & r;
QSemaphore & hasExited;
public:
DslArticleRequestRunnable( DslArticleRequest & r_,
QSemaphore & hasExited_ ): r( r_ ),
hasExited( hasExited_ )
{}
~DslArticleRequestRunnable()
{
hasExited.release();
}
virtual void run();
};
class DslArticleRequest: public Dictionary::DataRequest
{
friend class DslArticleRequestRunnable;
wstring word;
vector< wstring > alts;
DslDictionary & dict;
QAtomicInt isCancelled;
QSemaphore hasExited;
public:
DslArticleRequest( wstring const & word_,
vector< wstring > const & alts_,
DslDictionary & dict_ ):
word( word_ ), alts( alts_ ), dict( dict_ )
{
QThreadPool::globalInstance()->start(
new DslArticleRequestRunnable( *this, hasExited ) );
}
void run(); // Run from another thread by DslArticleRequestRunnable
virtual void cancel()
{
isCancelled.ref();
}
~DslArticleRequest()
{
isCancelled.ref();
hasExited.acquire();
}
};
void DslArticleRequestRunnable::run()
{
r.run();
}
void DslArticleRequest::run()
{
if ( isCancelled )
{
finish();
return;
}
vector< WordArticleLink > chain = dict.findArticles( word );
for( unsigned x = 0; x < alts.size(); ++x )
{
/// Make an additional query for each alt
vector< WordArticleLink > altChain = dict.findArticles( alts[ x ] );
chain.insert( chain.end(), altChain.begin(), altChain.end() );
}
multimap< wstring, string > mainArticles, alternateArticles;
set< uint32_t > articlesIncluded; // Some synonims make it that the articles
// appear several times. We combat this
// by only allowing them to appear once.
wstring wordCaseFolded = Folding::applySimpleCaseOnly( word );
for( unsigned x = 0; x < chain.size(); ++x )
{
// Check if we're cancelled occasionally
if ( isCancelled )
{
finish();
return;
}
if ( articlesIncluded.find( chain[ x ].articleOffset ) != articlesIncluded.end() )
continue; // We already have this article in the body.
// Now grab that article
string headword;
list< wstring > displayedHeadwords;
wstring articleBody;
dict.loadArticle( chain[ x ].articleOffset, headword, displayedHeadwords,
articleBody );
string articleText;
articleText += "";
articleText += "";
for( list< wstring >::const_iterator i = displayedHeadwords.begin();
i != displayedHeadwords.end(); ++i )
articleText += dict.dslToHtml( *i );
articleText += "
";
if ( displayedHeadwords.size() )
expandTildes( articleBody, displayedHeadwords.front() );
articleText += "";
articleText += dict.dslToHtml( articleBody );
articleText += "
";
articleText += "";
// Ok. Now, does it go to main articles, or to alternate ones? We list
// main ones first, and alternates after.
// We do the case-folded comparison here.
wstring headwordStripped =
Folding::applySimpleCaseOnly( Utf8::decode( headword ) );
multimap< wstring, string > & mapToUse =
( wordCaseFolded == headwordStripped ) ?
mainArticles : alternateArticles;
mapToUse.insert( pair< wstring, string >(
Folding::applySimpleCaseOnly( Utf8::decode( headword ) ),
articleText ) );
articlesIncluded.insert( chain[ x ].articleOffset );
}
if ( mainArticles.empty() && alternateArticles.empty() )
{
finish();
return;
}
string result;
multimap< wstring, string >::const_iterator i;
for( i = mainArticles.begin(); i != mainArticles.end(); ++i )
result += i->second;
for( i = alternateArticles.begin(); i != alternateArticles.end(); ++i )
result += i->second;
Mutex::Lock _( dataMutex );
data.resize( result.size() );
memcpy( &data.front(), result.data(), result.size() );
hasAnyData = true;
finish();
}
sptr< Dictionary::DataRequest > DslDictionary::getArticle( wstring const & word,
vector< wstring > const & alts )
throw( std::exception )
{
return new DslArticleRequest( word, alts, *this );
}
void loadFromFile( string const & n, vector< char > & data )
{
File::Class f( n, "rb" );
f.seekEnd();
data.resize( f.tell() );
f.rewind();
f.read( &data.front(), data.size() );
}
//// DslDictionary::getResource()
class DslResourceRequest;
class DslResourceRequestRunnable: public QRunnable
{
DslResourceRequest & r;
QSemaphore & hasExited;
public:
DslResourceRequestRunnable( DslResourceRequest & r_,
QSemaphore & hasExited_ ): r( r_ ),
hasExited( hasExited_ )
{}
~DslResourceRequestRunnable()
{
hasExited.release();
}
virtual void run();
};
class DslResourceRequest: public Dictionary::DataRequest
{
friend class DslResourceRequestRunnable;
Mutex & resourceZipMutex;
zip * resourceZip;
string dictionaryFileName, resourceName;
QAtomicInt isCancelled;
QSemaphore hasExited;
public:
DslResourceRequest( Mutex & resourceZipMutex_,
zip * resourceZip_,
string const & dictionaryFileName_,
string const & resourceName_ ):
resourceZipMutex( resourceZipMutex_ ),
resourceZip( resourceZip_ ),
dictionaryFileName( dictionaryFileName_ ),
resourceName( resourceName_ )
{
QThreadPool::globalInstance()->start(
new DslResourceRequestRunnable( *this, hasExited ) );
}
void run(); // Run from another thread by DslResourceRequestRunnable
virtual void cancel()
{
isCancelled.ref();
}
~DslResourceRequest()
{
isCancelled.ref();
hasExited.acquire();
}
};
void DslResourceRequestRunnable::run()
{
r.run();
}
void DslResourceRequest::run()
{
// Some runnables linger enough that they are cancelled before they start
if ( isCancelled )
{
finish();
return;
}
string n =
FsEncoding::dirname( dictionaryFileName ) +
FsEncoding::separator() +
FsEncoding::encode( resourceName );
printf( "n is %s\n", n.c_str() );
try
{
try
{
Mutex::Lock _( dataMutex );
loadFromFile( n, data );
}
catch( File::exCantOpen & )
{
n = dictionaryFileName + ".files" +
FsEncoding::separator() +
FsEncoding::encode( resourceName );
try
{
Mutex::Lock _( dataMutex );
loadFromFile( n, data );
}
catch( File::exCantOpen & )
{
// Try reading from zip file
if ( resourceZip )
{
string fname = FsEncoding::encode( resourceName );
struct zip_stat st;
zip_file * zf;
zip_stat_init( &st );
Mutex::Lock _( resourceZipMutex );
int fileIndex;
if ( !isCancelled &&
( fileIndex = zip_name_locate( resourceZip, fname.c_str(), 0 ) ) != -1 &&
!zip_stat_index( resourceZip, fileIndex, 0, &st ) &&
( zf = zip_fopen_index( resourceZip, fileIndex, 0 ) ) )
{
int result;
{
Mutex::Lock _( dataMutex );
data.resize( st.size );
result = zip_fread( zf, &data.front(), data.size() );
}
zip_fclose( zf );
if ( result != (int)st.size )
throw; // Make it fail since we couldn't read the archive
}
else
throw;
}
else
throw;
}
}
if ( Filetype::isNameOfTiff( resourceName ) )
{
// Convert it
dataMutex.lock();
QImage img = QImage::fromData( (unsigned char *) &data.front(),
data.size() );
dataMutex.unlock();
if ( !img.isNull() )
{
// Managed to load -- now store it back as BMP
QByteArray ba;
QBuffer buffer( &ba );
buffer.open( QIODevice::WriteOnly );
img.save( &buffer, "BMP" );
Mutex::Lock _( dataMutex );
data.resize( buffer.size() );
memcpy( &data.front(), buffer.data(), data.size() );
}
}
Mutex::Lock _( dataMutex );
hasAnyData = true;
}
catch( File::Ex & )
{
// No such resource -- we don't set the hasAnyData flag then
}
finish();
}
sptr< Dictionary::DataRequest > DslDictionary::getResource( string const & name )
throw( std::exception )
{
return new DslResourceRequest( resourceZipMutex, resourceZip,
getDictionaryFilenames()[ 0 ], name );
}
} // anonymous namespace
static bool tryPossibleName( string const & name, string & copyTo )
{
try
{
File::Class f( name, "rb" );
copyTo = name;
return true;
}
catch( ... )
{
return false;
}
}
#if 0
static void findCorrespondingFiles( string const & ifo,
string & idx, string & dict, string & syn,
bool needSyn )
{
string base( ifo, 0, ifo.size() - 3 );
if ( !(
tryPossibleName( base + "idx", idx ) ||
tryPossibleName( base + "idx.gz", idx ) ||
tryPossibleName( base + "idx.dz", idx ) ||
tryPossibleName( base + "IDX", idx ) ||
tryPossibleName( base + "IDX.GZ", idx ) ||
tryPossibleName( base + "IDX.DZ", idx )
) )
throw exNoIdxFile( ifo );
if ( !(
tryPossibleName( base + "dict", dict ) ||
tryPossibleName( base + "dict.dz", dict ) ||
tryPossibleName( base + "DICT", dict ) ||
tryPossibleName( base + "dict.DZ", dict )
) )
throw exNoDictFile( ifo );
if ( needSyn && !(
tryPossibleName( base + "syn", syn ) ||
tryPossibleName( base + "syn.gz", syn ) ||
tryPossibleName( base + "syn.dz", syn ) ||
tryPossibleName( base + "SYN", syn ) ||
tryPossibleName( base + "SYN.GZ", syn ) ||
tryPossibleName( base + "SYN.DZ", syn )
) )
throw exNoSynFile( ifo );
}
#endif
vector< sptr< Dictionary::Class > > makeDictionaries(
vector< string > const & fileNames,
string const & indicesDir,
Dictionary::Initializing & initializing )
throw( std::exception )
{
vector< sptr< Dictionary::Class > > dictionaries;
for( vector< string >::const_iterator i = fileNames.begin(); i != fileNames.end();
++i )
{
// Try .dsl and .dsl.dz suffixes
if ( ( i->size() < 4 ||
strcasecmp( i->c_str() + ( i->size() - 4 ), ".dsl" ) != 0 ) &&
( i->size() < 7 ||
strcasecmp( i->c_str() + ( i->size() - 7 ), ".dsl.dz" ) != 0 ) )
continue;
try
{
vector< string > dictFiles( 1, *i );
// Check if there is an 'abrv' file present
string baseName = ( (*i)[ i->size() - 4 ] == '.' ) ?
string( *i, 0, i->size() - 4 ) : string( *i, 0, i->size() - 7 );
string abrvFileName;
if ( tryPossibleName( baseName + "_abrv.dsl", abrvFileName ) ||
tryPossibleName( baseName + "_abrv.dsl.dz", abrvFileName ) ||
tryPossibleName( baseName + "_ABRV.DSL", abrvFileName ) ||
tryPossibleName( baseName + "_ABRV.DSL.DZ", abrvFileName ) ||
tryPossibleName( baseName + "_ABRV.DSL.dz", abrvFileName ) )
dictFiles.push_back( abrvFileName );
string dictId = Dictionary::makeDictionaryId( dictFiles );
string indexFile = indicesDir + dictId;
if ( Dictionary::needToRebuildIndex( dictFiles, indexFile ) ||
indexIsOldOrBad( indexFile ) )
{
DslScanner scanner( *i );
if ( scanner.getDictionaryName() == L"Abbrev" )
continue; // For now just skip abbreviations
// Building the index
initializing.indexingDictionary( Utf8::encode( scanner.getDictionaryName() ) );
printf( "Dictionary name: %ls\n", scanner.getDictionaryName().c_str() );
File::Class idx( indexFile, "wb" );
IdxHeader idxHeader;
memset( &idxHeader, 0, sizeof( idxHeader ) );
// We write a dummy header first. At the end of the process the header
// will be rewritten with the right values.
idx.write( idxHeader );
string dictionaryName = Utf8::encode( scanner.getDictionaryName() );
idx.write( (uint32_t) dictionaryName.size() );
idx.write( dictionaryName.data(), dictionaryName.size() );
idxHeader.dslEncoding = scanner.getEncoding();
IndexedWords indexedWords;
ChunkedStorage::Writer chunks( idx );
// Read the abbreviations
if ( abrvFileName.size() )
{
try
{
DslScanner abrvScanner( abrvFileName );
map< string, string > abrv;
wstring curString;
size_t curOffset;
for( ; ; )
{
// Skip any whitespace
if ( !abrvScanner.readNextLine( curString, curOffset ) )
break;
if ( curString.empty() || iswblank( curString[ 0 ] ) )
continue;
string key = Utf8::encode( curString );
if ( !abrvScanner.readNextLine( curString, curOffset ) )
{
fprintf( stderr, "Warning: premature end of file %s\n", abrvFileName.c_str() );
break;
}
if ( curString.empty() || !iswblank( curString[ 0 ] ) )
{
fprintf( stderr, "Warning: malformed file %s\n", abrvFileName.c_str() );
break;
}
curString.erase( 0, curString.find_first_not_of( L" \t" ) );
abrv[ key ] = Utf8::encode( curString );
}
idxHeader.hasAbrv = 1;
idxHeader.abrvAddress = chunks.startNewBlock();
uint32_t sz = abrv.size();
chunks.addToBlock( &sz, sizeof( uint32_t ) );
for( map< string, string >::const_iterator i = abrv.begin();
i != abrv.end(); ++i )
{
printf( "%s:%s\n", i->first.c_str(), i->second.c_str() );
sz = i->first.size();
chunks.addToBlock( &sz, sizeof( uint32_t ) );
chunks.addToBlock( i->first.data(), sz );
sz = i->second.size();
chunks.addToBlock( &sz, sizeof( uint32_t ) );
chunks.addToBlock( i->second.data(), sz );
}
}
catch( std::exception & e )
{
fprintf( stderr, "Error reading abrv file %s: %s. Skipping it.\n",
abrvFileName.c_str(), e.what() );
}
}
bool hasString = false;
wstring curString;
size_t curOffset;
for( ; ; )
{
// Find the main headword
if ( !hasString && !scanner.readNextLine( curString, curOffset ) )
break; // Clean end of file
hasString = false;
// The line read should either consist of pure whitespace, or be a
// headword
if ( curString.empty() )
continue;
if ( iswblank( curString[ 0 ] ) )
{
// The first character is blank. Let's make sure that all other
// characters are blank, too.
for( size_t x = 1; x < curString.size(); ++x )
{
if ( !iswblank( curString[ x ] ) )
{
fprintf( stderr, "Warning: garbage string in %s at offset 0x%X\n", i->c_str(), curOffset );
break;
}
}
continue;
}
// Ok, got the headword
list< wstring > allEntryWords;
processUnsortedParts( curString, true );
expandOptionalParts( curString, allEntryWords );
uint32_t articleOffset = curOffset;
//printf( "Headword: %ls\n", curString.c_str() );
// More headwords may follow
for( ; ; )
{
if ( ! ( hasString = scanner.readNextLine( curString, curOffset ) ) )
{
fprintf( stderr, "Warning: premature end of file %s\n", i->c_str() );
exit( 0 );
break;
}
if ( curString.empty() || iswblank( curString[ 0 ] ) )
break; // No more headwords
printf( "Alt headword: %ls\n", curString.c_str() );
processUnsortedParts( curString, true );
expandTildes( curString, allEntryWords.front() );
expandOptionalParts( curString, allEntryWords );
}
if ( !hasString )
break;
// Insert new entry
uint32_t descOffset = chunks.startNewBlock();
chunks.addToBlock( &articleOffset, sizeof( articleOffset ) );
for( list< wstring >::iterator j = allEntryWords.begin();
j != allEntryWords.end(); ++j )
{
unescapeDsl( *j );
indexedWords.addWord( *j, descOffset );
}
// Skip the article's body
for( ; ; )
{
if ( ! ( hasString = scanner.readNextLine( curString, curOffset ) ) )
break;
if ( curString.size() && !iswblank( curString[ 0 ] ) )
break;
}
// Now that we're having read the first string after the article
// itself, we can use its offset to calculate the article's size.
// An end of file works here, too.
uint32_t articleSize = ( curOffset - articleOffset );
chunks.addToBlock( &articleSize, sizeof( articleSize ) );
if ( !hasString )
break;
}
// Finish with the chunks
idxHeader.chunksOffset = chunks.finish();
// Build index
IndexInfo idxInfo = BtreeIndexing::buildIndex( indexedWords, idx );
idxHeader.indexBtreeMaxElements = idxInfo.btreeMaxElements;
idxHeader.indexRootOffset = idxInfo.rootOffset;
// That concludes it. Update the header.
idxHeader.signature = Signature;
idxHeader.formatVersion = CurrentFormatVersion;
idx.rewind();
idx.write( &idxHeader, sizeof( idxHeader ) );
}
dictionaries.push_back( new DslDictionary( dictId,
indexFile,
dictFiles ) );
}
catch( std::exception & e )
{
fprintf( stderr, "DSL dictionary reading failed: %s, error: %s\n",
i->c_str(), e.what() );
}
}
return dictionaries;
}
}