I2P Address: [http://git.idk.i2p]

Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • equincey/i2p.i2p
  • marek/i2p.i2p
  • kytv/i2p.i2p
  • agentoocat/i2p.i2p
  • aargh/i2p.i2p
  • Kalhintz/i2p.i2p
  • longyap/i2p.i2p
  • kelare/i2p.i2p
  • apsoyka/i2p.i2p
  • mesh/i2p.i2p
  • ashtod/i2p.i2p
  • y2kboy23/i2p.i2p
  • Lfrr/i2p.i2p
  • anonymousmaybe/i2p.i2p
  • obscuratus/i2p.i2p
  • zzz/i2p.i2p
  • lbt/i2p.i2p
  • 31337/i2p.i2p
  • DuncanIdaho/i2p.i2p
  • loveisgrief/i2p.i2p
  • i2p-hackers/i2p.i2p
  • thebland/i2p.i2p
  • elde/i2p.i2p
  • echelon/i2p.i2p
  • welshlyluvah1967/i2p.i2p
  • zlatinb/i2p.i2p
  • sadie/i2p.i2p
  • pVT0/i2p.i2p
  • idk/i2p.i2p
29 results
Show changes
Showing
with 6078 additions and 1211 deletions
......@@ -20,22 +20,28 @@
package org.klomp.snark;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.File;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.util.HashMap;
import org.klomp.snark.bencode.*;
import net.i2p.data.Base64;
import net.i2p.util.Log;
import gnu.getopt.Getopt;
import net.i2p.I2PAppContext;
import net.i2p.crypto.SHA1;
import net.i2p.data.DataHelper;
import net.i2p.util.Log;
import org.klomp.snark.bencode.BDecoder;
import org.klomp.snark.bencode.BEValue;
import org.klomp.snark.bencode.BEncoder;
import org.klomp.snark.bencode.InvalidBEncodingException;
/**
* Note: this class is buggy, as it doesn't propogate custom meta fields into the bencoded
......@@ -45,42 +51,140 @@ import net.i2p.crypto.SHA1;
*/
public class MetaInfo
{
private static final Log _log = new Log(MetaInfo.class);
private final Log _log = I2PAppContext.getGlobalContext().logManager().getLog(MetaInfo.class);
private final String announce;
private final byte[] info_hash;
private final String name;
private final String name_utf8;
private final List files;
private final List files_utf8;
private final List lengths;
private final List<List<String>> files;
private final List<List<String>> files_utf8;
private final List<String> attributes;
private final List<Long> lengths;
private final int piece_length;
private final byte[] piece_hashes;
private final long length;
private final Map infoMap;
private byte[] torrentdata;
private final int privateTorrent; // 0: not present; 1: = 1; -1: = 0
private final List<List<String>> announce_list;
private final String comment;
private final String created_by;
private final long creation_date;
private final List<String> url_list;
private Map<String, BEValue> infoMap;
private int infoBytesLength;
MetaInfo(String announce, String name, String name_utf8, List files, List lengths,
int piece_length, byte[] piece_hashes, long length)
/**
* Called by Storage when creating a new torrent from local data
*
* @param announce may be null
* @param files null for single-file torrent
* @param lengths null for single-file torrent
* @param announce_list may be null
* @param created_by may be null
* @param url_list may be null
* @param comment may be null
* @since public since 0.9.53, was package private
*/
public MetaInfo(String announce, String name, String name_utf8, List<List<String>> files, List<Long> lengths,
int piece_length, byte[] piece_hashes, long length, boolean privateTorrent,
List<List<String>> announce_list, String created_by, List<String> url_list, String comment)
{
this.announce = announce;
this.name = name;
this.name_utf8 = name_utf8;
this.files = files;
this.files = files == null ? null : Collections.unmodifiableList(files);
this.files_utf8 = null;
this.lengths = lengths;
this.lengths = lengths == null ? null : Collections.unmodifiableList(lengths);
this.piece_length = piece_length;
this.piece_hashes = piece_hashes;
this.length = length;
this.privateTorrent = privateTorrent ? 1 : 0;
this.announce_list = announce_list;
this.comment = comment;
this.created_by = created_by;
this.creation_date = I2PAppContext.getGlobalContext().clock().now();
this.url_list = url_list;
// TODO BEP 52 hybrid torrent with piece layers, meta version and file tree
this.attributes = null;
// TODO if we add a parameter for other keys
//if (other != null) {
// otherInfo = new HashMap(2);
// otherInfo.putAll(other);
//}
this.info_hash = calculateInfoHash();
infoMap = null;
//infoMap = null;
}
/**
* Preserves privateTorrent int value, for main()
*
* @since 0.9.62
*/
public MetaInfo(String announce, String name, String name_utf8, List<List<String>> files, List<Long> lengths,
int piece_length, byte[] piece_hashes, long length, int privateTorrent,
List<List<String>> announce_list, String created_by, List<String> url_list, String comment)
{
this.announce = announce;
this.name = name;
this.name_utf8 = name_utf8;
this.files = files == null ? null : Collections.unmodifiableList(files);
this.files_utf8 = null;
this.lengths = lengths == null ? null : Collections.unmodifiableList(lengths);
this.piece_length = piece_length;
this.piece_hashes = piece_hashes;
this.length = length;
this.privateTorrent = privateTorrent;
this.announce_list = announce_list;
this.comment = comment;
this.created_by = created_by;
this.creation_date = I2PAppContext.getGlobalContext().clock().now();
this.url_list = url_list;
this.attributes = null;
this.info_hash = calculateInfoHash();
}
/**
* Will not change infohash.
* Retains creation date of old MetaInfo if nonzero.
*
* @param new_announce may be null
* @param new_announce_list may be null
* @param new_comment may be null
* @param new_created_by may be null
* @param new_url_list may be null
* @since 0.9.64
*/
public MetaInfo(MetaInfo old, String new_announce, List<List<String>> new_announce_list, String new_comment,
String new_created_by, List<String> new_url_list)
{
this.announce = new_announce;
this.info_hash = old.info_hash;
this.name = old.name;
this.name_utf8 = old.name_utf8;
this.files = old.files;
this.files_utf8 = old.files_utf8;
this.attributes = old.attributes;
this.lengths = old.lengths;
this.piece_length = old.piece_length;
this.piece_hashes = old.piece_hashes;
this.length = old.length;
this.privateTorrent = old.privateTorrent;
this.announce_list = new_announce_list;
this.comment = new_comment;
this.created_by = new_created_by;
this.creation_date = old.creation_date > 0 ? old.creation_date : I2PAppContext.getGlobalContext().clock().now();
this.url_list = new_url_list;
this.infoMap = old.infoMap;
this.infoBytesLength = old.infoBytesLength;
}
/**
* Creates a new MetaInfo from the given InputStream. The
* InputStream must start with a correctly bencoded dictonary
* InputStream must start with a correctly bencoded dictionary
* describing the torrent.
* Caller must close the stream.
*/
public MetaInfo(InputStream in) throws IOException
{
......@@ -91,55 +195,159 @@ public class MetaInfo
* Creates a new MetaInfo from the given BDecoder. The BDecoder
* must have a complete dictionary describing the torrent.
*/
public MetaInfo(BDecoder be) throws IOException
private MetaInfo(BDecoder be) throws IOException
{
// Note that evaluation order matters here...
this(be.bdecodeMap().getMap());
byte[] origInfohash = be.get_special_map_digest();
// shouldn't ever happen
if (!DataHelper.eq(origInfohash, info_hash))
throw new InvalidBEncodingException("Infohash mismatch, please report");
}
/**
* Creates a new MetaInfo from a Map of BEValues and the SHA1 over
* the original bencoded info dictonary (this is a hack, we could
* the original bencoded info dictionary (this is a hack, we could
* reconstruct the bencoded stream and recalculate the hash). Will
* throw a InvalidBEncodingException if the given map does not
* contain a valid announce string or info dictonary.
* NOT throw a InvalidBEncodingException if the given map does not
* contain a valid announce string.
* WILL throw a InvalidBEncodingException if the given map does not
* contain a valid info dictionary.
*/
public MetaInfo(Map m) throws InvalidBEncodingException
public MetaInfo(Map<String, BEValue> m) throws InvalidBEncodingException
{
_log.debug("Creating a metaInfo: " + m, new Exception("source"));
BEValue val = (BEValue)m.get("announce");
if (val == null)
throw new InvalidBEncodingException("Missing announce string");
this.announce = val.getString();
if (_log.shouldLog(Log.DEBUG))
_log.debug("Creating a metaInfo: " + m, new Exception("source"));
BEValue val = m.get("announce");
// Disabled check, we can get info from a magnet now
if (val == null) {
//throw new InvalidBEncodingException("Missing announce string");
this.announce = null;
} else {
this.announce = val.getString();
}
val = (BEValue)m.get("info");
// BEP 12
val = m.get("announce-list");
if (val == null) {
this.announce_list = null;
} else {
this.announce_list = new ArrayList<List<String>>();
List<BEValue> bl1 = val.getList();
for (BEValue bev : bl1) {
List<BEValue> bl2 = bev.getList();
List<String> sl2 = new ArrayList<String>();
for (BEValue bev2 : bl2) {
sl2.add(bev2.getString());
}
this.announce_list.add(sl2);
}
}
// BEP 19
val = m.get("url-list");
if (val == null) {
this.url_list = null;
} else {
List<String> urllist;
try {
List<BEValue> bl1 = val.getList();
urllist = new ArrayList<String>(bl1.size());
for (BEValue bev : bl1) {
urllist.add(bev.getString());
}
} catch (InvalidBEncodingException ibee) {
// BEP 19 says it's a list but the example there
// is for a single byte string, and we've seen this
// in the wild.
urllist = Collections.singletonList(val.getString());
}
this.url_list = urllist;
}
// misc. optional top-level stuff
val = m.get("comment");
String st = null;
if (val != null) {
try {
st = val.getString();
} catch (InvalidBEncodingException ibee) {}
}
this.comment = st;
val = m.get("created by");
st = null;
if (val != null) {
try {
st = val.getString();
} catch (InvalidBEncodingException ibee) {}
}
this.created_by = st;
val = m.get("creation date");
long time = 0;
if (val != null) {
try {
time = val.getLong() * 1000;
} catch (InvalidBEncodingException ibee) {}
}
this.creation_date = time;
val = m.get("info");
if (val == null)
throw new InvalidBEncodingException("Missing info map");
Map info = val.getMap();
infoMap = info;
Map<String, BEValue> info = val.getMap();
infoMap = Collections.unmodifiableMap(info);
val = (BEValue)info.get("name");
val = info.get("name");
if (val == null)
throw new InvalidBEncodingException("Missing name string");
name = val.getString();
// We could silently replace the '/', but that messes up the info hash, so just throw instead.
if (name.indexOf('/') >= 0)
throw new InvalidBEncodingException("Invalid name containing '/' " + name);
val = (BEValue)info.get("name.utf-8");
val = info.get("name.utf-8");
if (val != null)
name_utf8 = val.getString();
else
name_utf8 = null;
val = (BEValue)info.get("piece length");
// BEP 27
val = info.get("private");
if (val != null) {
Object o = val.getValue();
// Is it supposed to be a number or a string?
// i2psnark does it as a string. BEP 27 doesn't say.
// Transmission does numbers. So does libtorrent.
// We handle both as of 0.9.9.
// We switch to storing as number as of 0.9.14.
boolean privat = "1".equals(o) ||
((o instanceof Number) && ((Number) o).intValue() == 1);
privateTorrent = privat ? 1 : -1;
} else {
privateTorrent = 0;
}
val = info.get("piece length");
if (val == null)
throw new InvalidBEncodingException("Missing piece length number");
piece_length = val.getInt();
val = (BEValue)info.get("pieces");
if (val == null)
val = info.get("pieces");
if (val == null) {
// BEP 52
// We do the check here because a torrent file could be combined v1/v2,
// so a version 2 value isn't by itself fatal
val = info.get("meta version");
if (val != null) {
int version = val.getInt();
if (version != 1)
throw new InvalidBEncodingException("Version " + version + " torrent file not supported");
}
throw new InvalidBEncodingException("Missing piece bytes");
}
piece_hashes = val.getBytes();
val = (BEValue)info.get("length");
val = info.get("length");
if (val != null)
{
// Single file case.
......@@ -147,76 +355,157 @@ public class MetaInfo
files = null;
files_utf8 = null;
lengths = null;
attributes = null;
}
else
{
// Multi file case.
val = (BEValue)info.get("files");
val = info.get("files");
if (val == null)
throw new InvalidBEncodingException
("Missing length number and/or files list");
List list = val.getList();
List<BEValue> list = val.getList();
int size = list.size();
if (size == 0)
throw new InvalidBEncodingException("zero size files list");
files = new ArrayList(size);
files_utf8 = new ArrayList(size);
lengths = new ArrayList(size);
List<List<String>> m_files = new ArrayList<List<String>>(size);
List<List<String>> m_files_utf8 = null;
List<Long> m_lengths = new ArrayList<Long>(size);
List<String> m_attributes = null;
long l = 0;
for (int i = 0; i < list.size(); i++)
{
Map desc = ((BEValue)list.get(i)).getMap();
val = (BEValue)desc.get("length");
Map<String, BEValue> desc = list.get(i).getMap();
val = desc.get("length");
if (val == null)
throw new InvalidBEncodingException("Missing length number");
long len = val.getLong();
lengths.add(new Long(len));
if (len < 0)
throw new InvalidBEncodingException("Negative file length");
m_lengths.add(Long.valueOf(len));
// check for overflowing the long
long oldTotal = l;
l += len;
if (l < oldTotal)
throw new InvalidBEncodingException("Huge total length");
val = (BEValue)desc.get("path");
val = desc.get("path");
if (val == null)
throw new InvalidBEncodingException("Missing path list");
List path_list = val.getList();
List<BEValue> path_list = val.getList();
int path_length = path_list.size();
if (path_length == 0)
throw new InvalidBEncodingException("zero size file path list");
List file = new ArrayList(path_length);
Iterator it = path_list.iterator();
while (it.hasNext())
file.add(((BEValue)it.next()).getString());
List<String> file = new ArrayList<String>(path_length);
Iterator<BEValue> it = path_list.iterator();
while (it.hasNext()) {
String s = it.next().getString();
// We could throw an IBEE, but just silently replace instead.
if (s.indexOf('/') >= 0)
s = s.replace("/", "_");
file.add(s);
}
// quick dup check - case sensitive, etc. - Storage does a better job
for (int j = 0; j < i; j++) {
if (file.equals(m_files.get(j)))
throw new InvalidBEncodingException("Duplicate file path " + DataHelper.toString(file));
}
files.add(file);
m_files.add(Collections.unmodifiableList(file));
val = (BEValue)desc.get("path.utf-8");
val = desc.get("path.utf-8");
if (val != null) {
m_files_utf8 = new ArrayList<List<String>>(size);
path_list = val.getList();
path_length = path_list.size();
if (path_length > 0) {
file = new ArrayList(path_length);
file = new ArrayList<String>(path_length);
it = path_list.iterator();
while (it.hasNext())
file.add(((BEValue)it.next()).getString());
files_utf8.add(file);
file.add(it.next().getString());
m_files_utf8.add(Collections.unmodifiableList(file));
}
}
// BEP 47
val = desc.get("attr");
if (val != null) {
String s = val.getString();
if (m_attributes == null) {
m_attributes = new ArrayList<String>(size);
for (int j = 0; j < i; j++) {
m_attributes.add("");
}
m_attributes.add(s);
}
} else {
if (m_attributes != null)
m_attributes.add("");
}
}
files = Collections.unmodifiableList(m_files);
files_utf8 = m_files_utf8 != null ? Collections.unmodifiableList(m_files_utf8) : null;
lengths = Collections.unmodifiableList(m_lengths);
length = l;
attributes = m_attributes;
}
info_hash = calculateInfoHash();
}
/**
* Efficiently returns the name and the 20 byte SHA1 hash of the info dictionary in a torrent file
* Caller must close stream.
*
* @param infoHashOut 20-byte out parameter
* @since 0.8.5
*/
public static String getNameAndInfoHash(InputStream in, byte[] infoHashOut) throws IOException {
BDecoder bd = new BDecoder(in);
Map<String, BEValue> m = bd.bdecodeMap().getMap();
BEValue ibev = m.get("info");
if (ibev == null)
throw new InvalidBEncodingException("Missing info map");
Map<String, BEValue> i = ibev.getMap();
BEValue rvbev = i.get("name");
if (rvbev == null)
throw new InvalidBEncodingException("Missing name");
byte[] h = bd.get_special_map_digest();
System.arraycopy(h, 0, infoHashOut, 0, 20);
return rvbev.getString();
}
/**
* Returns the string representing the URL of the tracker for this torrent.
* @return may be null!
*/
public String getAnnounce()
{
return announce;
}
/**
* Returns a list of lists of urls.
*
* @since 0.9.5
*/
public List<List<String>> getAnnounceList() {
return announce_list;
}
/**
* Returns a list of urls or null.
*
* @since 0.9.48
*/
public List<String> getWebSeedURLs() {
return url_list;
}
/**
* Returns the original 20 byte SHA1 hash over the bencoded info map.
*/
......@@ -227,9 +516,12 @@ public class MetaInfo
}
/**
* Returns the piece hashes. Only used by storage so package local.
* Returns the piece hashes.
*
* @return not a copy, do not modify
* @since public since 0.9.53, was package private
*/
byte[] getPieceHashes()
public byte[] getPieceHashes()
{
return piece_hashes;
}
......@@ -244,28 +536,79 @@ public class MetaInfo
return name;
}
/**
* Is it a private torrent?
* @since 0.9
*/
public boolean isPrivate() {
return privateTorrent > 0;
}
/**
* @return 0 (default), 1 (set to 1), -1 (set to 0)
* @since 0.9.62
*/
public int getPrivateTrackerStatus() {
return privateTorrent;
}
/**
* Returns a list of lists of file name hierarchies or null if it is
* a single name. It has the same size as the list returned by
* getLengths().
*/
public List getFiles()
public List<List<String>> getFiles()
{
// XXX - Immutable?
return files;
}
/**
* Is this file a padding file?
* @since 0.9.48
*/
public boolean isPaddingFile(int filenum) {
if (attributes == null)
return false;
return attributes.get(filenum).indexOf('p') >= 0;
}
/**
* Returns a list of Longs indication the size of the individual
* files, or null if it is a single file. It has the same size as
* the list returned by getFiles().
*/
public List getLengths()
public List<Long> getLengths()
{
// XXX - Immutable?
return lengths;
}
/**
* The comment string or null.
* Not available for locally-created torrents.
* @since 0.9.7
*/
public String getComment() {
return this.comment;
}
/**
* The created-by string or null.
* Not available for locally-created torrents.
* @since 0.9.7
*/
public String getCreatedBy() {
return this.created_by;
}
/**
* The creation date (ms) or zero.
* As of 0.9.19, available for locally-created torrents.
* @since 0.9.7
*/
public long getCreationDate() {
return this.creation_date;
}
/**
* Returns the number of pieces.
*/
......@@ -278,7 +621,7 @@ public class MetaInfo
* Return the length of a piece. All pieces are of equal length
* except for the last one (<code>getPieces()-1</code>).
*
* @exception IndexOutOfBoundsException when piece is equal to or
* @throws IndexOutOfBoundsException when piece is equal to or
* greater then the number of pieces in the torrent.
*/
public int getPieceLength(int piece)
......@@ -287,7 +630,7 @@ public class MetaInfo
if (piece >= 0 && piece < pieces -1)
return piece_length;
else if (piece == pieces -1)
return (int)(length - piece * piece_length);
return (int)(length - ((long)piece * piece_length));
else
throw new IndexOutOfBoundsException("no piece: " + piece);
}
......@@ -299,11 +642,13 @@ public class MetaInfo
*/
public boolean checkPiece(int piece, byte[] bs, int off, int length)
{
if (true)
//if (true)
return fast_checkPiece(piece, bs, off, length);
else
return orig_checkPiece(piece, bs, off, length);
//else
// return orig_checkPiece(piece, bs, off, length);
}
/****
private boolean orig_checkPiece(int piece, byte[] bs, int off, int length) {
// Check digest
MessageDigest sha1;
......@@ -323,29 +668,56 @@ public class MetaInfo
return false;
return true;
}
****/
private boolean fast_checkPiece(int piece, byte[] bs, int off, int length) {
SHA1 sha1 = new SHA1();
MessageDigest sha1 = SHA1.getInstance();
sha1.update(bs, off, length);
byte[] hash = sha1.digest();
for (int i = 0; i < 20; i++)
for (int i = 0; i < 20; i++) {
if (hash[i] != piece_hashes[20 * piece + i])
return false;
}
return true;
}
/**
* @return good
* @since 0.9.1
*/
boolean checkPiece(PartialPiece pp) {
int piece = pp.getPiece();
byte[] hash;
try {
hash = pp.getHash();
} catch (IOException ioe) {
// Could be caused by closing a peer connnection
// we don't want the exception to propagate through
// to Storage.putPiece()
_log.warn("Error checking", ioe);
return false;
}
for (int i = 0; i < 20; i++) {
if (hash[i] != piece_hashes[20 * piece + i])
return false;
}
return true;
}
/**
* Returns the total length of the torrent in bytes.
* This includes any padding files.
*/
public long getTotalLength()
{
return length;
}
@Override
public String toString()
{
return "MetaInfo[info_hash='" + hexencode(info_hash)
return "MetaInfo[info_hash='" + I2PSnarkUtil.toHex(info_hash)
+ "', announce='" + announce
+ "', name='" + name
+ "', files=" + files
......@@ -356,106 +728,248 @@ public class MetaInfo
}
/**
* Encode a byte array as a hex encoded string.
* Creates a copy of this MetaInfo that shares everything except the
* announce URL.
* Drops any announce-list.
* Preserves infohash and info map, including any non-standard fields.
* @param announce may be null
*/
private static String hexencode(byte[] bs)
public MetaInfo reannounce(String announce) throws InvalidBEncodingException
{
StringBuffer sb = new StringBuffer(bs.length*2);
for (int i = 0; i < bs.length; i++)
{
int c = bs[i] & 0xFF;
if (c < 16)
sb.append('0');
sb.append(Integer.toHexString(c));
}
return sb.toString();
Map<String, BEValue> m = new HashMap<String, BEValue>();
if (announce != null)
m.put("announce", new BEValue(DataHelper.getUTF8(announce)));
Map<String, BEValue> info = createInfoMap();
m.put("info", new BEValue(info));
return new MetaInfo(m);
}
/**
* Creates a copy of this MetaInfo that shares everything except the
* announce URL.
* Called by servlet to save a new torrent file generated from local data
*/
public MetaInfo reannounce(String announce)
public synchronized byte[] getTorrentData()
{
return new MetaInfo(announce, name, name_utf8, files,
lengths, piece_length,
piece_hashes, length);
Map<String, Object> m = new HashMap<String, Object>();
if (announce != null)
m.put("announce", announce);
if (announce_list != null)
m.put("announce-list", announce_list);
// misc. optional top-level stuff
if (url_list != null)
m.put("url-list", url_list);
if (comment != null)
m.put("comment", comment);
if (created_by != null)
m.put("created by", created_by);
if (creation_date != 0)
m.put("creation date", creation_date / 1000);
Map<String, BEValue> info = createInfoMap();
m.put("info", info);
// don't save this locally, we should only do this once
return BEncoder.bencode(m);
}
public byte[] getTorrentData()
{
if (torrentdata == null)
{
Map m = new HashMap();
m.put("announce", announce);
Map info = createInfoMap();
m.put("info", info);
torrentdata = BEncoder.bencode(m);
}
return torrentdata;
/**
* Side effect: Caches infoBytesLength.
* @since 0.8.4
*/
public synchronized byte[] getInfoBytes() {
if (infoMap == null)
createInfoMap();
byte[] rv = BEncoder.bencode(infoMap);
infoBytesLength = rv.length;
return rv;
}
/**
* The size of getInfoBytes().
* Cached.
* @since 0.9.48
*/
public synchronized int getInfoBytesLength() {
if (infoBytesLength > 0)
return infoBytesLength;
return getInfoBytes().length;
}
private Map createInfoMap()
/** @return an unmodifiable view of the Map */
private Map<String, BEValue> createInfoMap()
{
Map info = new HashMap();
if (infoMap != null) {
info.putAll(infoMap);
return info;
}
info.put("name", name);
// If we loaded this metainfo from a file, we have the map, and we must use it
// or else we will lose any non-standard keys and corrupt the infohash.
if (infoMap != null)
return Collections.unmodifiableMap(infoMap);
// we should only get here if serving a magnet on a torrent we created
// or on edit torrent save
if (_log.shouldDebug())
_log.debug("Creating new infomap", new Exception());
// otherwise we must create it
Map<String, BEValue> info = new HashMap<String, BEValue>();
info.put("name", new BEValue(DataHelper.getUTF8(name)));
if (name_utf8 != null)
info.put("name.utf-8", name_utf8);
info.put("piece length", new Integer(piece_length));
info.put("pieces", piece_hashes);
info.put("name.utf-8", new BEValue(DataHelper.getUTF8(name_utf8)));
// BEP 27
if (privateTorrent != 0)
// switched to number in 0.9.14
//info.put("private", new BEValue(DataHelper.getUTF8("1")));
info.put("private", new BEValue(Integer.valueOf(privateTorrent > 0 ? 1 : 0)));
info.put("piece length", new BEValue(Integer.valueOf(piece_length)));
info.put("pieces", new BEValue(piece_hashes));
if (files == null)
info.put("length", new Long(length));
info.put("length", new BEValue(Long.valueOf(length)));
else
{
List l = new ArrayList();
List<BEValue> l = new ArrayList<BEValue>();
for (int i = 0; i < files.size(); i++)
{
Map file = new HashMap();
file.put("path", files.get(i));
if ( (files_utf8 != null) && (files_utf8.size() > i) )
file.put("path.utf-8", files_utf8.get(i));
file.put("length", lengths.get(i));
l.add(file);
Map<String, BEValue> file = new HashMap<String, BEValue>();
List<String> fi = files.get(i);
List<BEValue> befiles = new ArrayList<BEValue>(fi.size());
for (int j = 0; j < fi.size(); j++) {
befiles.add(new BEValue(DataHelper.getUTF8(fi.get(j))));
}
file.put("path", new BEValue(befiles));
if ( (files_utf8 != null) && (files_utf8.size() > i) ) {
List<String> fiu = files_utf8.get(i);
List<BEValue> beufiles = new ArrayList<BEValue>(fiu.size());
for (int j = 0; j < fiu.size(); j++) {
beufiles.add(new BEValue(DataHelper.getUTF8(fiu.get(j))));
}
file.put("path.utf-8", new BEValue(beufiles));
}
file.put("length", new BEValue(lengths.get(i)));
String attr = null;
if (attributes != null) {
attr = attributes.get(i);
if (attr.length() > 0)
file.put("attr", new BEValue(DataHelper.getASCII(attr)));
}
l.add(new BEValue(file));
}
info.put("files", l);
info.put("files", new BEValue(l));
}
return info;
// TODO BEP 52 meta version and file tree
// TODO if we add the ability for other keys in the first constructor
//if (otherInfo != null)
// info.putAll(otherInfo);
infoMap = info;
return Collections.unmodifiableMap(infoMap);
}
private byte[] calculateInfoHash()
{
Map info = createInfoMap();
StringBuffer buf = new StringBuffer(128);
buf.append("info: ");
for (Iterator iter = info.keySet().iterator(); iter.hasNext(); ) {
String key = (String)iter.next();
Object val = info.get(key);
buf.append(key).append('=');
if (val instanceof byte[])
buf.append(Base64.encode((byte[])val, true));
else
Map<String, BEValue> info = createInfoMap();
if (_log.shouldLog(Log.DEBUG)) {
StringBuilder buf = new StringBuilder(128);
buf.append("info: ");
for (Map.Entry<String, BEValue> entry : info.entrySet()) {
String key = entry.getKey();
Object val = entry.getValue();
buf.append(key).append('=');
buf.append(val.toString());
}
_log.debug(buf.toString());
}
_log.debug(buf.toString());
byte[] infoBytes = BEncoder.bencode(info);
//_log.debug("info bencoded: [" + Base64.encode(infoBytes, true) + "]");
try
{
MessageDigest digest = MessageDigest.getInstance("SHA");
MessageDigest digest = SHA1.getInstance();
byte hash[] = digest.digest(infoBytes);
_log.debug("info hash: [" + net.i2p.data.Base64.encode(hash) + "]");
if (_log.shouldLog(Log.DEBUG))
_log.debug("info hash: " + I2PSnarkUtil.toHex(hash));
return hash;
}
/** @since 0.8.5 */
public static void main(String[] args) {
boolean error = false;
String created_by = null;
String announce = null;
List<String> url_list = null;
String comment = null;
Getopt g = new Getopt("MetaInfo", args, "a:c:m:w:");
try {
int c;
while ((c = g.getopt()) != -1) {
switch (c) {
case 'a':
announce = g.getOptarg();
break;
case 'c':
created_by = g.getOptarg();
break;
case 'm':
comment = g.getOptarg();
break;
case 'w':
if (url_list == null)
url_list = new ArrayList<String>();
url_list.add(g.getOptarg());
break;
case '?':
case ':':
default:
error = true;
break;
} // switch
} // while
} catch (RuntimeException e) {
e.printStackTrace();
error = true;
}
catch(NoSuchAlgorithmException nsa)
{
throw new InternalError(nsa.toString());
if (error || args.length - g.getOptind() <= 0) {
System.err.println("Usage: MetaInfo [-a announceURL] [-c created-by] [-m comment] [-w webseed-url]* file.torrent [file2.torrent...]");
System.exit(1);
}
for (int i = g.getOptind(); i < args.length; i++) {
InputStream in = null;
java.io.OutputStream out = null;
try {
in = new FileInputStream(args[i]);
MetaInfo meta = new MetaInfo(in);
System.out.println(args[i] +
"\nInfoHash: " + I2PSnarkUtil.toHex(meta.getInfoHash()) +
"\nAnnounce: " + meta.getAnnounce() +
"\nWebSeed URLs: " + meta.getWebSeedURLs() +
"\nCreated By: " + meta.getCreatedBy() +
"\nComment: " + meta.getComment());
if (created_by != null || announce != null || url_list != null || comment != null) {
String cb = created_by != null ? created_by : meta.getCreatedBy();
String an = announce != null ? announce : meta.getAnnounce();
String cm = comment != null ? comment : meta.getComment();
List<String> urls = url_list != null ? url_list : meta.getWebSeedURLs();
MetaInfo meta2 = new MetaInfo(meta, an, meta.getAnnounceList(), cm, cb, urls);
java.io.File from = new java.io.File(args[i]);
java.io.File to = new java.io.File(args[i] + ".bak");
if (net.i2p.util.FileUtil.copy(from, to, true, false)) {
out = new java.io.FileOutputStream(from);
out.write(meta2.getTorrentData());
out.close();
System.out.println("Modified " + from + " and backed up old file to " + to);
System.out.println(args[i] +
"\nInfoHash: " + I2PSnarkUtil.toHex(meta2.getInfoHash()) +
"\nAnnounce: " + meta2.getAnnounce() +
"\nWebSeed URLs: " + meta2.getWebSeedURLs() +
"\nCreated By: " + meta2.getCreatedBy() +
"\nComment: " + meta2.getComment());
} else {
System.out.println("Failed backup of " + from + " to " + to);
}
}
} catch (IOException ioe) {
System.err.println("Error in file " + args[i] + ": " + ioe);
} finally {
try { if (in != null) in.close(); } catch (IOException ioe) {}
try { if (out != null) out.close(); } catch (IOException ioe) {}
}
}
}
}
package org.klomp.snark;
import java.io.DataInputStream;
import java.io.DataOutput;
import java.io.EOFException;
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.security.MessageDigest;
import net.i2p.I2PAppContext;
import net.i2p.crypto.SHA1;
import net.i2p.data.ByteArray;
import net.i2p.util.ByteCache;
import net.i2p.util.Log;
import net.i2p.util.SecureFile;
/**
* Store the received data either on the heap or in a temp file.
* The third option, to write chunks directly to the destination file,
* is unimplemented.
*
* This is the class passed from PeerCoordinator to PeerState so
* PeerState may start requests.
*
* It is also passed from PeerState to PeerCoordinator when
* a piece is not completely downloaded, for example
* when the Peer disconnects or chokes.
*
* New objects for the same piece are created during the end game -
* this object should not be shared among multiple peers.
*
* @since 0.8.2
*/
class PartialPiece implements Comparable<PartialPiece> {
// we store the piece so we can use it in compareTo()
private final Piece piece;
// null if using temp file
private final byte[] bs;
private int off;
//private final long createdTime;
private File tempfile;
private RandomAccessFile raf;
private final int pclen;
private final File tempDir;
private final BitField bitfield;
private static final int BUFSIZE = PeerState.PARTSIZE;
private static final ByteCache _cache = ByteCache.getInstance(16, BUFSIZE);
// Any bigger than this, use temp file instead of heap
private static final int MAX_IN_MEM = 128 * 1024;
// May be reduced on OOM
private static int _max_in_mem = MAX_IN_MEM;
/**
* Used by PeerCoordinator.
* Creates a new PartialPiece, with no chunks yet downloaded.
* Allocates the data storage area, either on the heap or in the
* temp directory, depending on size.
*
* @param piece Piece number requested.
* @param len must be equal to the piece length
*/
public PartialPiece (Piece piece, int len, File tempDir) {
this.piece = piece;
this.pclen = len;
//this.createdTime = 0;
this.tempDir = tempDir;
bitfield = new BitField((len + PeerState.PARTSIZE - 1) / PeerState.PARTSIZE);
// temps for finals
byte[] tbs = null;
try {
if (len <= MAX_IN_MEM) {
try {
tbs = new byte[len];
return;
} catch (OutOfMemoryError oom) {
if (_max_in_mem > PeerState.PARTSIZE)
_max_in_mem /= 2;
Log log = I2PAppContext.getGlobalContext().logManager().getLog(PartialPiece.class);
log.logAlways(Log.WARN, "OOM creating new partial piece");
// fall through to use temp file
}
}
// delay creating temp file until required in read()
} finally {
// finals
this.bs = tbs;
}
}
/**
* Caller must synchronize
*
* @since 0.9.1
*/
private void createTemp() throws IOException {
//tfile = SecureFile.createTempFile("piece", null, tempDir);
// debug
tempfile = SecureFile.createTempFile("piece_" + piece.getId() + '_', null, tempDir);
raf = new RandomAccessFile(tempfile, "rw");
}
/**
* Convert this PartialPiece to a request for the next chunk.
* Used by PeerState only.
*
* @return null if complete
*/
public synchronized Request getRequest() {
int chunk = off / PeerState.PARTSIZE;
int sz = bitfield.size();
for (int i = chunk; i < sz; i++) {
if (!bitfield.get(i))
return new Request(this, off, Math.min(pclen - off, PeerState.PARTSIZE));
if (i == sz - 1)
off = pclen;
else
off += PeerState.PARTSIZE;
}
return null;
}
/** piece number */
public int getPiece() {
return this.piece.getId();
}
/**
* @since 0.9.1
*/
public int getLength() {
return this.pclen;
}
/**
* @since 0.9.62
*/
public synchronized boolean isComplete() {
return bitfield.complete();
}
/**
* Have any chunks been downloaded?
*
* @since 0.9.63
*/
public synchronized boolean hasData() {
return bitfield.count() > 0;
}
/**
* Has this chunk been downloaded?
*
* @since 0.9.63
*/
public synchronized boolean hasChunk(int chunk) {
return bitfield.get(chunk);
}
/**
* How many bytes are good - as set by read().
* As of 0.9.63, accurately counts good bytes after "holes".
*/
public synchronized int getDownloaded() {
if (bitfield.complete())
return pclen;
int sz = bitfield.count();
int rv = sz * PeerState.PARTSIZE;
int rem = pclen % PeerState.PARTSIZE;
if (rem != 0 && bitfield.get(sz - 1))
rv -= PeerState.PARTSIZE - rem;
return rv;
}
/**
* Piece must be complete.
* The SHA1 hash of the completely read data.
* @since 0.9.1
*/
public byte[] getHash() throws IOException {
MessageDigest sha1 = SHA1.getInstance();
if (bs != null) {
sha1.update(bs);
} else {
int read = 0;
int buflen = Math.min(pclen, BUFSIZE);
ByteArray ba;
byte[] buf;
if (buflen == BUFSIZE) {
ba = _cache.acquire();
buf = ba.getData();
} else {
ba = null;
buf = new byte[buflen];
}
synchronized (this) {
if (raf == null)
throw new IOException();
raf.seek(0);
while (read < pclen) {
int rd = raf.read(buf, 0, Math.min(buf.length, pclen - read));
if (rd < 0)
break;
read += rd;
sha1.update(buf, 0, rd);
}
}
if (ba != null)
_cache.release(ba, false);
if (read < pclen)
throw new IOException();
}
return sha1.digest();
}
/**
* Blocking.
* If offset matches the previous downloaded amount
* (as set by a previous call to read() or setDownlaoded()),
* the downloaded amount will be incremented by len.
*
* @since 0.9.1
*/
public void read(DataInputStream din, int offset, int len, BandwidthListener bwl) throws IOException {
if (offset % PeerState.PARTSIZE != 0)
throw new IOException("Bad offset " + offset);
int chunk = offset / PeerState.PARTSIZE;
// We read the data before checking if we have the chunk,
// because otherwise we'd have to break the peer connection
if (bs != null) {
// Don't use readFully() so we may update the BandwidthListener as we go
//in.readFully(bs, offset, len);
int offs = offset;
int toRead = len;
while (toRead > 0) {
int numRead = din.read(bs, offs, toRead);
if (numRead < 0)
throw new EOFException();
offs += numRead;
toRead -= numRead;
bwl.downloaded(numRead);
}
synchronized (this) {
if (bitfield.get(chunk)) {
warn("Already have chunk " + chunk + " on " + this);
} else {
bitfield.set(chunk);
if (this.off == offset) {
this.off += len;
// if this filled in a hole, advance off
int sz = bitfield.size();
for (int i = chunk + 1; i < sz; i++) {
if (!bitfield.get(i))
break;
warn("Hole filled in before chunk " + i + " on " + this + ' ' + bitfield);
if (i == sz - 1)
off = pclen;
else
off += PeerState.PARTSIZE;
}
} else {
warn("Out of order chunk " + chunk + " on " + this + ' ' + bitfield);
}
}
}
} else {
// read in fully before synching on raf
ByteArray ba;
byte[] tmp;
if (len == BUFSIZE) {
ba = _cache.acquire();
tmp = ba.getData();
} else {
ba = null;
tmp = new byte[len];
}
// Don't use readFully() so we may update the BandwidthListener as we go
//din.readFully(tmp);
int offs = 0;
int toRead = len;
while (toRead > 0) {
int numRead = din.read(tmp, offs, toRead);
if (numRead < 0)
throw new EOFException();
offs += numRead;
toRead -= numRead;
bwl.downloaded(numRead);
}
synchronized (this) {
if (bitfield.get(chunk)) {
warn("Already have chunk " + chunk + " on " + this);
} else {
if (raf == null)
createTemp();
raf.seek(offset);
raf.write(tmp);
bitfield.set(chunk);
if (this.off == offset) {
this.off += len;
// if this filled in a hole, advance off
int sz = bitfield.size();
for (int i = chunk + 1; i < sz; i++) {
if (!bitfield.get(i))
break;
warn("Hole filled in before chunk " + i + " on " + this + ' ' + bitfield);
if (i == sz - 1)
off = pclen;
else
off += PeerState.PARTSIZE;
}
} else {
warn("Out of order chunk " + chunk + " on " + this + ' ' + bitfield);
}
}
}
if (ba != null)
_cache.release(ba, false);
}
}
/**
* Piece must be complete.
* Caller must synchronize on out and seek to starting point.
* Caller must call release() when done with the whole piece.
*
* @param out stream to write to
* @param offset offset in the piece
* @param len length to write
* @since 0.9.1
*/
public void write(DataOutput out, int offset, int len) throws IOException {
if (bs != null) {
out.write(bs, offset, len);
} else {
int read = 0;
int buflen = Math.min(len, BUFSIZE);
ByteArray ba;
byte[] buf;
if (buflen == BUFSIZE) {
ba = _cache.acquire();
buf = ba.getData();
} else {
ba = null;
buf = new byte[buflen];
}
synchronized (this) {
if (raf == null)
throw new IOException();
raf.seek(offset);
while (read < len) {
int rd = Math.min(buf.length, len - read);
raf.readFully(buf, 0, rd);
read += rd;
out.write(buf, 0, rd);
}
}
if (ba != null)
_cache.release(ba, false);
}
}
/**
* Release all resources.
*
* @since 0.9.1
*/
public void release() {
if (bs == null) {
synchronized (this) {
if (raf != null) {
locked_release();
raf = null;
}
}
//if (raf != null)
// I2PAppContext.getGlobalContext().logManager().getLog(PartialPiece.class).warn("Released " + tempfile);
}
}
/**
* Caller must synchronize
*
* @since 0.9.1
*/
private void locked_release() {
try {
raf.close();
} catch (IOException ioe) {
}
tempfile.delete();
}
/*
* Highest priority first,
* then rarest first,
* then highest downloaded first
*/
public int compareTo(PartialPiece opp) {
int d = this.piece.compareTo(opp.piece);
if (d != 0)
return d;
return opp.getDownloaded() - getDownloaded(); // reverse
}
@Override
public int hashCode() {
return piece.getId() * 7777;
}
/**
* Make this simple so PeerCoordinator can keep a List.
* Warning - compares piece number only!
*/
@Override
public boolean equals(Object o) {
if (o instanceof PartialPiece) {
PartialPiece pp = (PartialPiece)o;
return pp.piece.getId() == this.piece.getId();
}
return false;
}
@Override
public String toString() {
return "Partial(" + piece.getId() + ',' + off + ',' + getDownloaded() + ',' + pclen + ')';
}
/**
* @since 0.9.62
*/
public static void warn(String s) {
I2PAppContext.getGlobalContext().logManager().getLog(PartialPiece.class).warn(s);
}
}
......@@ -20,75 +20,136 @@
package org.klomp.snark;
import java.io.*;
import java.net.*;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
import org.klomp.snark.bencode.*;
import net.i2p.I2PAppContext;
import net.i2p.client.streaming.I2PSocket;
import net.i2p.data.DataHelper;
import net.i2p.data.Destination;
import net.i2p.util.Log;
public class Peer implements Comparable
import org.klomp.snark.bencode.BEValue;
import org.klomp.snark.bencode.InvalidBEncodingException;
public class Peer implements Comparable<Peer>, BandwidthListener
{
private Log _log = new Log(Peer.class);
protected final Log _log = I2PAppContext.getGlobalContext().logManager().getLog(getClass());
// Identifying property, the peer id of the other side.
private final PeerID peerID;
private final byte[] my_id;
final MetaInfo metainfo;
private final byte[] infohash;
/** will start out null in magnet mode */
protected MetaInfo metainfo;
private Map<String, BEValue> handshakeMap;
// The data in/output streams set during the handshake and used by
// the actual connections.
private DataInputStream din;
private DataOutputStream dout;
// Keeps state for in/out connections. Non-null when the handshake
// was successful, the connection setup and runs
PeerState state;
/** running counters */
private final AtomicLong downloaded = new AtomicLong();
private final AtomicLong uploaded = new AtomicLong();
/** `
* Keeps state for in/out connections. Non-null when the handshake
* was successful, the connection setup and runs.
* Do not access directly. All actions should be through Peer methods.
*/
volatile PeerState state;
/** shared across all peers on this torrent */
MagnetState magnetState;
private I2PSocket sock;
private boolean deregister = true;
private static long __id;
private long _id;
private static final AtomicLong __id = new AtomicLong();
private final long _id;
private final AtomicBoolean _disconnected = new AtomicBoolean();
final static long CHECK_PERIOD = PeerCoordinator.CHECK_PERIOD;
final static int RATE_DEPTH = PeerCoordinator.RATE_DEPTH; // make following arrays RATE_DEPTH long
private final long uploaded_old[] = {-1,-1,-1};
private final long downloaded_old[] = {-1,-1,-1};
private static final byte[] HANDSHAKE = DataHelper.getASCII("BitTorrent protocol");
// See BEP 4 for definitions
// bytes per bt spec: 0011223344556677
private static final long OPTION_EXTENSION = 0x0000000000100000l;
private static final long OPTION_FAST = 0x0000000000000004l;
//private static final long OPTION_DHT = 0x0000000000000001l;
//private static final long OPTION_AZMP = 0x1000000000000000l;
// hybrid support TODO
private static final long OPTION_V2 = 0x0000000000000010L;
private long options;
private final boolean _isIncoming;
private int _totalCommentsSent;
private int _maxPipeline = PeerState.MIN_PIPELINE;
private long connected;
private long pexLastSent;
/**
* Outgoing connection.
* Creates a disconnected peer given a PeerID, your own id and the
* relevant MetaInfo.
* @param metainfo null if in magnet mode
*/
public Peer(PeerID peerID, byte[] my_id, MetaInfo metainfo)
throws IOException
public Peer(PeerID peerID, byte[] my_id, byte[] infohash, MetaInfo metainfo)
{
this.peerID = peerID;
this.my_id = my_id;
this.infohash = infohash;
this.metainfo = metainfo;
_id = ++__id;
//_log.debug("Creating a new peer with " + peerID.getAddress().calculateHash().toBase64(), new Exception("creating"));
_id = __id.incrementAndGet();
_isIncoming = false;
//_log.debug("Creating a new peer with " + peerID.toString(), new Exception("creating"));
}
/**
* Incoming connection.
* Creates a unconnected peer from the input and output stream got
* from the socket. Note that the complete handshake (which can take
* some time or block indefinitely) is done in the calling Thread to
* get the remote peer id. To completely start the connection call
* the connect() method.
*
* @exception IOException when an error occurred during the handshake.
* @param metainfo null if in magnet mode
* @throws IOException when an error occurred during the handshake.
*/
public Peer(final I2PSocket sock, InputStream in, OutputStream out, byte[] my_id, MetaInfo metainfo)
public Peer(final I2PSocket sock, InputStream in, OutputStream out, byte[] my_id, byte[] infohash, MetaInfo metainfo)
throws IOException
{
this.my_id = my_id;
this.infohash = infohash;
this.metainfo = metainfo;
this.sock = sock;
byte[] id = handshake(in, out);
this.peerID = new PeerID(id, sock.getPeerDestination());
_id = ++__id;
_log.debug("Creating a new peer with " + peerID.getAddress().calculateHash().toBase64(), new Exception("creating " + _id));
_id = __id.incrementAndGet();
if (_log.shouldLog(Log.DEBUG))
_log.debug("Creating a new peer " + peerID.toString(), new Exception("creating " + _id));
_isIncoming = true;
}
/**
* Is this an incoming connection?
* For RPC
* @since 0.9.30
*/
public boolean isIncoming() {
return _isIncoming;
}
/**
......@@ -102,6 +163,7 @@ public class Peer implements Comparable
/**
* Returns the String representation of the peerID.
*/
@Override
public String toString()
{
if (peerID != null)
......@@ -110,18 +172,33 @@ public class Peer implements Comparable
return "[unknown id] " + _id;
}
/**
* @return socket debug string (for debug printing)
*/
public String getSocket()
{
if (state != null) {
String r = state.getRequests();
if (r != null)
return sock.toString() + "<br><b>Requests:</b> <span class=\"debugRequests\">" + r + "</span>";
}
return sock.toString();
}
/**
* The hash code of a Peer is the hash code of the peerID.
*/
@Override
public int hashCode()
{
return peerID.hashCode() ^ (2 << _id);
return peerID.hashCode() ^ (7777 * (int)_id);
}
/**
* Two Peers are equal when they have the same PeerID.
* All other properties are ignored.
*/
@Override
public boolean equals(Object o)
{
if (o instanceof Peer)
......@@ -135,10 +212,11 @@ public class Peer implements Comparable
/**
* Compares the PeerIDs.
* @deprecated unused?
*/
public int compareTo(Object o)
@Deprecated
public int compareTo(Peer p)
{
Peer p = (Peer)o;
int rv = peerID.compareTo(p.peerID);
if (rv == 0) {
if (_id > p._id) return 1;
......@@ -160,62 +238,83 @@ public class Peer implements Comparable
*
* If the given BitField is non-null it is send to the peer as first
* message.
*
* @param uploadOnly if we are complete with skipped files, i.e. a partial seed
*/
public void runConnection(PeerListener listener, BitField bitfield)
{
public void runConnection(I2PSnarkUtil util, PeerListener listener, BandwidthListener bwl, BitField bitfield,
MagnetState mState, boolean uploadOnly) {
if (state != null)
throw new IllegalStateException("Peer already started");
_log.debug("Running connection to " + peerID.getAddress().calculateHash().toBase64(), new Exception("connecting"));
if (_log.shouldLog(Log.DEBUG))
_log.debug("Running connection to " + peerID.toString(), new Exception("connecting"));
try
{
// Do we need to handshake?
if (din == null)
{
sock = I2PSnarkUtil.instance().connect(peerID);
_log.debug("Connected to " + peerID + ": " + sock);
// Outgoing connection
sock = util.connect(peerID);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Connected to " + peerID + ": " + sock);
if ((sock == null) || (sock.isClosed())) {
throw new IOException("Unable to reach " + peerID);
}
InputStream in = sock.getInputStream();
OutputStream out = sock.getOutputStream(); //new BufferedOutputStream(sock.getOutputStream());
if (true) {
// buffered output streams are internally synchronized, so we can't get through to the underlying
// I2PSocket's MessageOutputStream to close() it if we are blocking on a write(...). Oh, and the
// buffer is unnecessary anyway, as unbuffered access lets the streaming lib do the 'right thing'.
//out = new BufferedOutputStream(out);
in = new BufferedInputStream(sock.getInputStream());
}
//BufferedInputStream bis
// = new BufferedInputStream(sock.getInputStream());
//BufferedOutputStream bos
// = new BufferedOutputStream(sock.getOutputStream());
byte [] id = handshake(in, out); //handshake(bis, bos);
OutputStream out = sock.getOutputStream();
byte [] id = handshake(in, out);
byte [] expected_id = peerID.getID();
if (!Arrays.equals(expected_id, id))
throw new IOException("Unexpected peerID '"
if (expected_id == null) {
peerID.setID(id);
} else if (Arrays.equals(expected_id, id)) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Handshake got matching IDs with " + toString());
} else {
throw new IOException("Unexpected peerID '"
+ PeerID.idencode(id)
+ "' expected '"
+ PeerID.idencode(expected_id) + "'");
_log.debug("Handshake got matching IDs with " + toString());
}
} else {
_log.debug("Already have din [" + sock + "] with " + toString());
// Incoming connection
if (_log.shouldLog(Log.DEBUG))
_log.debug("Already have din [" + sock + "] with " + toString());
}
// bad idea?
if (metainfo == null && (options & OPTION_EXTENSION) == 0) {
if (_log.shouldLog(Log.INFO))
_log.info("Peer does not support extensions and we need metainfo, dropping");
throw new IOException("Peer does not support extensions and we need metainfo, dropping");
}
PeerConnectionIn in = new PeerConnectionIn(this, din);
PeerConnectionOut out = new PeerConnectionOut(this, dout);
PeerState s = new PeerState(this, listener, metainfo, in, out);
PeerState s = new PeerState(this, listener, bwl, metainfo, in, out);
if ((options & OPTION_EXTENSION) != 0) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Peer supports extensions, sending reply message");
int metasize = metainfo != null ? metainfo.getInfoBytesLength() : -1;
boolean pexAndMetadata = metainfo == null || !metainfo.isPrivate();
boolean dht = util.getDHT() != null;
boolean comment = util.utCommentsEnabled();
out.sendExtension(0, ExtensionHandler.getHandshake(metasize, pexAndMetadata, dht, uploadOnly, comment));
}
// Send our bitmap
if (bitfield != null)
s.out.sendBitfield(bitfield);
// We are up and running!
state = s;
magnetState = mState;
connected = util.getContext().clock().now();
listener.connected(this);
_log.debug("Start running the reader with " + toString());
// Use this thread for running the incomming connection.
if (_log.shouldLog(Log.DEBUG))
_log.debug("Start running the reader with " + toString());
// Use this thread for running the incoming connection.
// The outgoing connection creates its own Thread.
out.startup();
Thread.currentThread().setName("Snark reader from " + peerID);
......@@ -245,56 +344,141 @@ public class Peer implements Comparable
* Sets DataIn/OutputStreams, does the handshake and returns the id
* reported by the other side.
*/
private byte[] handshake(InputStream in, OutputStream out) //BufferedInputStream bis, BufferedOutputStream bos)
private byte[] handshake(InputStream in, OutputStream out)
throws IOException
{
din = new DataInputStream(in);
dout = new DataOutputStream(out);
// Handshake write - header
dout.write(19);
dout.write("BitTorrent protocol".getBytes("UTF-8"));
// Handshake write - zeros
byte[] zeros = new byte[8];
dout.write(zeros);
dout.write(HANDSHAKE.length);
dout.write(HANDSHAKE);
// Handshake write - options
long myOptions = OPTION_EXTENSION;
// we can't handle HAVE_ALL or HAVE_NONE if we don't know the number of pieces
if (metainfo != null)
myOptions |= OPTION_FAST;
// FIXME get util here somehow
//if (util.getDHT() != null)
// myOptions |= OPTION_I2P_DHT;
dout.writeLong(myOptions);
// Handshake write - metainfo hash
byte[] shared_hash = metainfo.getInfoHash();
dout.write(shared_hash);
dout.write(infohash);
// Handshake write - peer id
dout.write(my_id);
dout.flush();
_log.debug("Wrote my shared hash and ID to " + toString());
if (_log.shouldLog(Log.DEBUG))
_log.debug("Wrote my shared hash and ID to " + toString());
// Handshake read - header
din = new DataInputStream(in);
byte b = din.readByte();
if (b != 19)
if (b != HANDSHAKE.length)
throw new IOException("Handshake failure, expected 19, got "
+ (b & 0xff) + " on " + sock);
byte[] bs = new byte[19];
byte[] bs = new byte[HANDSHAKE.length];
din.readFully(bs);
String bittorrentProtocol = new String(bs, "UTF-8");
if (!"BitTorrent protocol".equals(bittorrentProtocol))
if (!Arrays.equals(HANDSHAKE, bs))
throw new IOException("Handshake failure, expected "
+ "'Bittorrent protocol', got '"
+ bittorrentProtocol + "'");
+ "'BitTorrent protocol'");
// Handshake read - zeros
din.readFully(zeros);
// Handshake read - options
options = din.readLong();
// Handshake read - metainfo hash
bs = new byte[20];
din.readFully(bs);
if (!Arrays.equals(shared_hash, bs))
if (!Arrays.equals(infohash, bs))
throw new IOException("Unexpected MetaInfo hash");
// Handshake read - peer id
din.readFully(bs);
_log.debug("Read the remote side's hash and peerID fully from " + toString());
if (_log.shouldLog(Log.DEBUG))
_log.debug("Read the remote side's hash and peerID fully from " + toString());
if (DataHelper.eq(my_id, bs))
throw new IOException("Connected to myself");
if (options != 0) {
// send them something in runConnection() above
if (_log.shouldLog(Log.DEBUG))
_log.debug("Peer supports options 0x" + Long.toHexString(options) + ": " + toString());
}
return bs;
}
/** @since 0.9.21 */
public boolean supportsFast() {
return (options & OPTION_FAST) != 0;
}
/** @since 0.8.4 */
public Destination getDestination() {
if (sock == null)
return null;
return sock.getPeerDestination();
}
/**
* Shared state across all peers, callers must sync on returned object
* @return non-null
* @since 0.8.4
*/
public MagnetState getMagnetState() {
return magnetState;
}
/** @return could be null @since 0.8.4 */
public Map<String, BEValue> getHandshakeMap() {
return handshakeMap;
}
/**
* @param map non-null
* @since 0.8.4
*/
public void setHandshakeMap(Map<String, BEValue> map) {
handshakeMap = map;
BEValue bev = map.get("reqq");
if (bev != null) {
try {
int reqq = bev.getInt();
_maxPipeline = Math.min(PeerState.MAX_PIPELINE, Math.max(PeerState.MIN_PIPELINE, reqq));
} catch (InvalidBEncodingException ibee) {}
} else {
// BEP 10 "The default in libtorrent is 250"
_maxPipeline = PeerState.MAX_PIPELINE;
}
}
/**
* @return min of PeerState.MIN_PIPELINE, max of PeerState.MAX_PIPELINE
* @since 0.9.47
*/
public int getMaxPipeline() {
return _maxPipeline;
}
/** @since 0.8.4 */
public void sendExtension(int type, byte[] payload) {
PeerState s = state;
if (s != null)
s.out.sendExtension(type, payload);
}
/**
* Switch from magnet mode to normal mode
* @since 0.8.4
*/
public void setMetaInfo(MetaInfo meta) {
metainfo = meta;
PeerState s = state;
if (s != null)
s.setMetaInfo(meta);
}
public boolean isConnected()
{
return state != null;
......@@ -315,17 +499,32 @@ public class Peer implements Comparable
void disconnect()
{
if (!_disconnected.compareAndSet(false, true))
return;
PeerState s = state;
if (s != null)
{
// try to save partial piece
if (this.deregister) {
PeerListener p = s.listener;
if (p != null) {
List<Request> pcs = s.returnPartialPieces();
if (!pcs.isEmpty())
p.savePartialPieces(this, pcs);
// now covered by savePartialPieces
//p.markUnrequested(this);
}
}
state = null;
PeerConnectionIn in = s.in;
if (in != null)
in.disconnect();
PeerConnectionOut out = s.out;
if (out != null)
out.disconnect();
// this is blocking in streaming, so do this after closing the socket
// so it won't really block
//PeerConnectionOut out = s.out;
//if (out != null)
// out.disconnect();
PeerListener pl = s.listener;
if (pl != null)
pl.disconnected(this);
......@@ -339,6 +538,13 @@ public class Peer implements Comparable
_log.warn("Error disconnecting " + toString(), ioe);
}
}
if (s != null) {
// this is blocking in streaming, so do this after closing the socket
// so it won't really block
PeerConnectionOut out = s.out;
if (out != null)
out.disconnect();
}
}
/**
......@@ -351,6 +557,39 @@ public class Peer implements Comparable
s.havePiece(piece);
}
/**
* Tell the other side that we are no longer interested in any of
* the outstanding requests (if any) for this piece.
* @since 0.8.1
*/
void cancel(int piece) {
PeerState s = state;
if (s != null)
s.cancelPiece(piece);
}
/**
* Are we currently requesting the piece?
* @deprecated deadlocks
* @since 0.8.1
*/
@Deprecated
boolean isRequesting(int p) {
PeerState s = state;
return s != null && s.isRequesting(p);
}
/**
* Update the request queue.
* Call after adding wanted pieces.
* @since 0.8.1
*/
void request() {
PeerState s = state;
if (s != null)
s.addRequest();
}
/**
* Whether or not the peer is interested in pieces we have. Returns
* false if not connected.
......@@ -365,7 +604,9 @@ public class Peer implements Comparable
* Sets whether or not we are interested in pieces from this peer.
* Defaults to false. When interest is true and this peer unchokes
* us then we start downloading from it. Has no effect when not connected.
* @deprecated unused
*/
@Deprecated
public void setInteresting(boolean interest)
{
PeerState s = state;
......@@ -413,49 +654,265 @@ public class Peer implements Comparable
return (s == null) || s.choked;
}
/////// begin BandwidthListener interface ///////
/**
* Increment the counter.
* @since 0.8.4
*/
public void downloaded(int size) {
downloaded.addAndGet(size);
PeerState s = state;
if (s != null)
s.getBandwidthListener().downloaded(size);
}
/**
* Increment the counter.
* @since 0.8.4
*/
public void uploaded(int size) {
uploaded.addAndGet(size);
PeerState s = state;
if (s != null)
s.getBandwidthListener().uploaded(size);
}
/**
* Returns the number of bytes that have been downloaded.
* Can be reset to zero with <code>resetCounters()</code>/
* Can be reset to zero with <code>resetCounters()</code>
* which is called every CHECK_PERIOD by PeerCheckerTask.
*/
public long getDownloaded()
{
PeerState s = state;
return (s != null) ? s.downloaded : 0;
return downloaded.get();
}
/**
* Returns the number of bytes that have been uploaded.
* Can be reset to zero with <code>resetCounters()</code>/
* Can be reset to zero with <code>resetCounters()</code>
* which is called every CHECK_PERIOD by PeerCheckerTask.
*/
public long getUploaded()
{
return uploaded.get();
}
/**
* Returns the average rate in Bps
*/
public long getUploadRate()
{
return PeerCoordinator.getRate(uploaded_old);
}
public long getDownloadRate()
{
return PeerCoordinator.getRate(downloaded_old);
}
/**
* Should we send this many bytes?
* Do NOT call uploaded() after this.
* @since 0.9.62
*/
public boolean shouldSend(int size) {
PeerState s = state;
if (s != null) {
boolean rv = s.getBandwidthListener().shouldSend(size);
if (rv)
uploaded.addAndGet(size);
return rv;
}
return false;
}
/**
* Should we request this many bytes?
* @since 0.9.62
*/
public boolean shouldRequest(int size) {
PeerState s = state;
return (s != null) ? s.uploaded : 0;
if (s != null)
return s.getBandwidthListener().shouldRequest(this, size);
return false;
}
/**
* Resets the downloaded and uploaded counters to zero.
* Should we request this many bytes?
* @since 0.9.62
*/
public boolean shouldRequest(Peer peer, int size) {
if (peer != this)
return false;
PeerState s = state;
if (s != null)
return s.getBandwidthListener().shouldRequest(this, size);
return false;
}
/**
* Current limit in Bps
* @since 0.9.62
*/
public void resetCounters()
public long getUpBWLimit() {
PeerState s = state;
if (s != null)
return s.getBandwidthListener().getUpBWLimit();
return Integer.MAX_VALUE;
}
/**
* Is snark as a whole over its limit?
* @since 0.9.62
*/
public boolean overUpBWLimit()
{
PeerState s = state;
if (s != null)
return s.getBandwidthListener().overUpBWLimit();
return false;
}
/**
* Current limit in Bps
* @since 0.9.62
*/
public long getDownBWLimit() {
PeerState s = state;
if (s != null)
{
s.downloaded = 0;
s.uploaded = 0;
}
return s.getBandwidthListener().getDownBWLimit();
return Integer.MAX_VALUE;
}
/**
* Are we currently over the limit?
* @since 0.9.62
*/
public boolean overDownBWLimit() {
PeerState s = state;
if (s != null)
return s.getBandwidthListener().overDownBWLimit();
return false;
}
/**
* Push the total uploaded/downloaded onto a RATE_DEPTH deep stack
* Resets the downloaded and uploaded counters to zero.
*/
void setRateHistory() {
long up = uploaded.getAndSet(0);
PeerCoordinator.setRate(up, uploaded_old);
long down = downloaded.getAndSet(0);
PeerCoordinator.setRate(down, downloaded_old);
}
/////// end BandwidthListener interface ///////
public long getInactiveTime() {
PeerState s = state;
if (s != null) {
PeerConnectionIn in = s.in;
PeerConnectionOut out = s.out;
if (out != null)
return System.currentTimeMillis() - out.lastSent;
else
if (in != null && out != null) {
long now = System.currentTimeMillis();
return Math.max(now - out.lastSent, now - in.lastRcvd);
} else
return -1; //"state, no out";
} else {
return -1; //"no state";
}
}
/** @since 0.9.36 */
public long getMaxInactiveTime() {
return isCompleted() && !isInteresting() ?
PeerCoordinator.MAX_SEED_INACTIVE :
PeerCoordinator.MAX_INACTIVE;
}
/**
* Send keepalive
*/
public void keepAlive()
{
PeerState s = state;
if (s != null)
s.keepAlive();
}
/**
* Retransmit outstanding requests if necessary
*/
public void retransmitRequests()
{
PeerState s = state;
if (s != null)
s.retransmitRequests();
}
/**
* Return how much the peer has
* @return number of completed pieces (not bytes)
*/
public int completed()
{
PeerState s = state;
if (s == null || s.bitfield == null)
return 0;
return s.bitfield.count();
}
/**
* Return if a peer is a seeder
*/
public boolean isCompleted()
{
PeerState s = state;
if (s == null || s.bitfield == null)
return false;
return s.bitfield.complete();
}
/** @since 0.9.31 */
int getTotalCommentsSent() {
return _totalCommentsSent;
}
/** @since 0.9.31 */
void setTotalCommentsSent(int count) {
_totalCommentsSent = count;
}
/**
* @return false
* @since 0.9.49
*/
public boolean isWebPeer() {
return false;
}
/**
* when did handshake complete?
* @since 0.9.63
*/
public long getWhenConnected() {
return connected;
}
/**
* when did we last send pex peers?
* @since 0.9.63
*/
public long getPexLastSent() {
return pexLastSent;
}
/**
* when did we last send pex peers?
* @since 0.9.63
*/
public void setPexLastSent(long now) {
pexLastSent = now;
}
}
......@@ -20,10 +20,13 @@
package org.klomp.snark;
import java.io.*;
import java.net.*;
import java.util.Iterator;
import java.io.BufferedInputStream;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.SequenceInputStream;
import net.i2p.I2PAppContext;
import net.i2p.client.streaming.I2PSocket;
import net.i2p.data.Base64;
import net.i2p.data.DataHelper;
......@@ -35,12 +38,16 @@ import net.i2p.util.Log;
* protocol connection. The PeerAcceptor will then create a new peer
* if the PeerCoordinator wants more peers.
*/
public class PeerAcceptor
class PeerAcceptor
{
private static final Log _log = new Log(PeerAcceptor.class);
private final Log _log = I2PAppContext.getGlobalContext().logManager().getLog(PeerAcceptor.class);
private final PeerCoordinator coordinator;
final PeerCoordinatorSet coordinators;
/** shorten timeout while reading handshake */
private static final long HASH_READ_TIMEOUT = 45*1000;
public PeerAcceptor(PeerCoordinator coordinator)
{
this.coordinator = coordinator;
......@@ -64,29 +71,39 @@ public class PeerAcceptor
// talk about, and we can just look for that in our list of active torrents.
byte peerInfoHash[] = null;
if (in instanceof BufferedInputStream) {
// multitorrent
in.mark(LOOKAHEAD_SIZE);
peerInfoHash = readHash(in);
long timeout = socket.getReadTimeout();
socket.setReadTimeout(HASH_READ_TIMEOUT);
try {
peerInfoHash = readHash(in);
} catch (IOException ioe) {
// unique exception so ConnectionAcceptor can blame the peer
throw new ProtocolException(ioe.toString());
}
socket.setReadTimeout(timeout);
in.reset();
} else {
// is this working right?
// Single torrent - is this working right?
try {
peerInfoHash = readHash(in);
_log.info("infohash read from " + socket.getPeerDestination().calculateHash().toBase64()
+ ": " + Base64.encode(peerInfoHash));
if (_log.shouldLog(Log.INFO))
_log.info("infohash read from " + socket.getPeerDestination().calculateHash().toBase64()
+ ": " + Base64.encode(peerInfoHash));
} catch (IOException ioe) {
_log.info("Unable to read the infohash from " + socket.getPeerDestination().calculateHash().toBase64());
if (_log.shouldLog(Log.INFO))
_log.info("Unable to read the infohash from " + socket.getPeerDestination().calculateHash().toBase64());
throw ioe;
}
in = new SequenceInputStream(new ByteArrayInputStream(peerInfoHash), in);
}
if (coordinator != null) {
// single torrent capability
MetaInfo meta = coordinator.getMetaInfo();
if (DataHelper.eq(meta.getInfoHash(), peerInfoHash)) {
if (DataHelper.eq(coordinator.getInfoHash(), peerInfoHash)) {
if (coordinator.needPeers())
{
Peer peer = new Peer(socket, in, out, coordinator.getID(),
coordinator.getMetaInfo());
coordinator.getInfoHash(), coordinator.getMetaInfo());
coordinator.addPeer(peer);
}
else
......@@ -94,26 +111,24 @@ public class PeerAcceptor
} else {
// its for another infohash, but we are only single torrent capable. b0rk.
throw new IOException("Peer wants another torrent (" + Base64.encode(peerInfoHash)
+ ") while we only support (" + Base64.encode(meta.getInfoHash()) + ")");
+ ") while we only support (" + Base64.encode(coordinator.getInfoHash()) + ")");
}
} else {
// multitorrent capable, so lets see what we can handle
for (Iterator iter = coordinators.iterator(); iter.hasNext(); ) {
PeerCoordinator cur = (PeerCoordinator)iter.next();
MetaInfo meta = cur.getMetaInfo();
if (DataHelper.eq(meta.getInfoHash(), peerInfoHash)) {
PeerCoordinator cur = coordinators.get(peerInfoHash);
if (cur != null) {
if (DataHelper.eq(cur.getInfoHash(), peerInfoHash)) {
if (cur.needPeers())
{
Peer peer = new Peer(socket, in, out, cur.getID(),
cur.getMetaInfo());
cur.getInfoHash(), cur.getMetaInfo());
cur.addPeer(peer);
return;
}
else
{
if (_log.shouldLog(Log.DEBUG))
_log.debug("Rejecting new peer for " + cur.snark.torrent);
_log.debug("Rejecting new peer for " + cur.getName());
socket.close();
return;
}
......@@ -125,21 +140,49 @@ public class PeerAcceptor
}
}
private static final int LOOKAHEAD_SIZE = "19".length() +
"BitTorrent protocol".length() +
private static final String PROTO_STR = "BitTorrent protocol";
private static final int PROTO_STR_LEN = PROTO_STR.length();
private static final int PROTO_LEN = PROTO_STR_LEN + 1;
private static final int[] PROTO = new int[PROTO_LEN];
static {
PROTO[0] = PROTO_STR_LEN;
for (int i = 0; i < PROTO_STR_LEN; i++) {
PROTO[i+1] = PROTO_STR.charAt(i);
}
}
/** 48 */
private static final int LOOKAHEAD_SIZE = PROTO_LEN +
8 + // blank, reserved
20; // infohash
/**
* Read ahead to the infohash, throwing an exception if there isn't enough data
* Read ahead to the infohash, throwing an exception if there isn't enough data.
* Also check the first 20 bytes for the correct protocol here and throw IOE if bad,
* so we don't hang waiting for 48 bytes if it's not a bittorrent client.
* The 20 bytes are checked again in Peer.handshake().
*/
private byte[] readHash(InputStream in) throws IOException {
byte buf[] = new byte[LOOKAHEAD_SIZE];
private static byte[] readHash(InputStream in) throws IOException {
for (int i = 0; i < PROTO_LEN; i++) {
int b = in.read();
if (b != PROTO[i])
throw new IOException("Bad protocol 0x" + Integer.toHexString(b) + " at byte " + i);
}
DataHelper.skip(in, 8);
byte buf[] = new byte[20];
int read = DataHelper.read(in, buf);
if (read != buf.length)
throw new IOException("Unable to read the hash (read " + read + ")");
byte rv[] = new byte[20];
System.arraycopy(buf, buf.length-rv.length-1, rv, 0, rv.length);
return rv;
return buf;
}
/**
* A unique exception so we can tell the ConnectionAcceptor about non-BT connections
* @since 0.9.1
*/
public static class ProtocolException extends IOException {
public ProtocolException(String s) {
super(s);
}
}
}
......@@ -20,127 +20,182 @@
package org.klomp.snark;
import java.util.*;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import net.i2p.data.DataHelper;
import net.i2p.util.Log;
import org.klomp.snark.dht.DHT;
/**
* TimerTask that checks for good/bad up/downloader. Works together
* with the PeerCoordinator to select which Peers get (un)choked.
*/
class PeerCheckerTask extends TimerTask
class PeerCheckerTask implements Runnable
{
private final long KILOPERSECOND = 1024*(PeerCoordinator.CHECK_PERIOD/1000);
private static final long KILOPERSECOND = 1024*(PeerCoordinator.CHECK_PERIOD/1000);
private final PeerCoordinator coordinator;
private final I2PSnarkUtil _util;
private final Log _log;
private final Random random;
private int _runCount;
PeerCheckerTask(PeerCoordinator coordinator)
PeerCheckerTask(I2PSnarkUtil util, PeerCoordinator coordinator)
{
_util = util;
_log = util.getContext().logManager().getLog(PeerCheckerTask.class);
random = util.getContext().random();
this.coordinator = coordinator;
}
public void run()
{
synchronized(coordinator.peers)
{
_runCount++;
List<Peer> peerList = coordinator.peerList();
if (peerList.isEmpty() || coordinator.halted()) {
coordinator.setRateHistory(0, 0);
return;
}
// Calculate total uploading and worst downloader.
long worstdownload = Long.MAX_VALUE;
Peer worstDownloader = null;
int peers = 0;
int uploaders = 0;
int downloaders = 0;
int interested = 0;
int interesting = 0;
int choking = 0;
int choked = 0;
int interestedUploaders = 0;
int removedCount = 0;
long uploaded = 0;
long downloaded = 0;
// Keep track of peers we remove now,
// we will add them back to the end of the list.
List removed = new ArrayList();
Iterator it = coordinator.peers.iterator();
while (it.hasNext())
{
Peer peer = (Peer)it.next();
List<Peer> removed = new ArrayList<Peer>();
int uploadLimit = coordinator.allowedUploaders();
boolean overBWLimit = coordinator.overUpBWLimit();
if (_log.shouldLog(Log.DEBUG))
_log.debug("START peers: " + peerList.size() + " uploaders: " + coordinator.getUploaders() +
" interested: " + coordinator.getInterestedUploaders() +
" limit: " + uploadLimit + " overBW? " + overBWLimit);
DHT dht = _util.getDHT();
boolean fetchComments = _util.utCommentsEnabled();
int i = 0;
for (Peer peer : peerList) {
i++;
// Remove dying peers
if (!peer.isConnected())
{
it.remove();
coordinator.removePeerFromPieces(peer);
coordinator.peerCount = coordinator.peers.size();
// This was just a failsafe, right?
//it.remove();
//coordinator.removePeerFromPieces(peer);
//coordinator.peerCount = coordinator.peers.size();
continue;
}
peers++;
if (peer.getInactiveTime() > peer.getMaxInactiveTime()) {
if (_log.shouldLog(Log.WARN))
_log.warn("Disconnecting peer idle " +
DataHelper.formatDuration(peer.getInactiveTime()) + ": " + peer);
peer.disconnect();
continue;
}
if (!peer.isChoking())
// we only count choking AND interested, so as not to steal a slot
// from some other torrent
if (peer.isInterested() && !peer.isChoking())
uploaders++;
if (!peer.isChoked() && peer.isInteresting())
downloaders++;
if (peer.isInterested())
interested++;
if (peer.isInteresting())
interesting++;
if (peer.isChoking())
choking++;
if (peer.isChoked())
choked++;
// XXX - We should calculate the up/download rate a bit
// more intelligently
long upload = peer.getUploaded();
uploaded += upload;
long download = peer.getDownloaded();
downloaded += download;
peer.resetCounters();
peer.setRateHistory();
if (Snark.debug >= Snark.DEBUG)
{
Snark.debug(peer + ":", Snark.DEBUG);
Snark.debug(" ul: " + upload/KILOPERSECOND
+ " dl: " + download/KILOPERSECOND
+ " i: " + peer.isInterested()
+ " I: " + peer.isInteresting()
+ " c: " + peer.isChoking()
+ " C: " + peer.isChoked(),
Snark.DEBUG);
}
if (_log.shouldLog(Log.DEBUG)) {
_log.debug(peer + ":"
+ " ul: " + upload*1024/KILOPERSECOND
+ " dl: " + download*1024/KILOPERSECOND
+ " i: " + peer.isInterested()
+ " I: " + peer.isInteresting()
+ " c: " + peer.isChoking()
+ " C: " + peer.isChoked());
}
// Choke a percentage of them rather than all so it isn't so drastic...
// choke 3/8 of the time when seeding and 1/4 when leeching
boolean overBWLimitChoke = upload > 0 &&
((overBWLimit && (random.nextInt(8) > (coordinator.completed() ? 4 : 5))) ||
(coordinator.overUpBWLimit(uploaded)));
// If we are at our max uploaders and we have lots of other
// interested peers try to make some room.
// (Note use of coordinator.uploaders)
if (coordinator.uploaders >= PeerCoordinator.MAX_UPLOADERS
&& interested > PeerCoordinator.MAX_UPLOADERS
int cup = coordinator.getUploaders();
if (((cup == uploadLimit
&& coordinator.getInterestedAndChoking() > 0)
|| cup > uploadLimit
|| overBWLimitChoke)
&& !peer.isChoking())
{
// Check if it still wants pieces from us.
if (!peer.isInterested())
{
if (Snark.debug >= Snark.INFO)
Snark.debug("Choke uninterested peer: " + peer,
Snark.INFO);
// Note that we only choke if we are over our limits,
// so a peer may remain unchoked even if uninterested.
if (_log.shouldLog(Log.DEBUG))
_log.debug("Choke uninterested peer: " + peer);
peer.setChoking(true);
uploaders--;
coordinator.uploaders--;
coordinator.decrementUploaders(false);
// Put it at the back of the list
it.remove();
removed.add(peer);
}
else if (peer.isChoked())
else if (overBWLimitChoke)
{
if (_log.shouldLog(Log.DEBUG))
_log.debug("BW limit (" + upload + "/" + uploaded + "), choke peer: " + peer);
peer.setChoking(true);
uploaders--;
interestedUploaders--;
coordinator.decrementUploaders(true);
removedCount++;
// Put it at the back of the list for fairness, even though we won't be unchoking this time
removed.add(peer);
}
else if (peer.isInteresting() && peer.isChoked())
{
// If they are choking us make someone else a downloader
if (Snark.debug >= Snark.DEBUG)
Snark.debug("Choke choking peer: " + peer, Snark.DEBUG);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Choke choking peer: " + peer);
peer.setChoking(true);
uploaders--;
coordinator.uploaders--;
interestedUploaders--;
coordinator.decrementUploaders(true);
removedCount++;
// Put it at the back of the list
removed.add(peer);
}
else if (!peer.isInteresting() && !coordinator.completed() &&
// give new peers a better chance to get their first two pieces
(peer.completed() >= 2 || random.nextInt(4) == 0))
{
// If they aren't interesting make someone else a downloader
if (_log.shouldLog(Log.DEBUG))
_log.debug("Choke uninteresting peer: " + peer);
peer.setChoking(true);
uploaders--;
interestedUploaders--;
coordinator.decrementUploaders(true);
removedCount++;
// Put it at the back of the list
it.remove();
removed.add(peer);
}
else if (peer.isInteresting()
......@@ -148,57 +203,119 @@ class PeerCheckerTask extends TimerTask
&& download == 0)
{
// We are downloading but didn't receive anything...
if (Snark.debug >= Snark.DEBUG)
Snark.debug("Choke downloader that doesn't deliver:"
+ peer, Snark.DEBUG);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Choke downloader that doesn't deliver: " + peer);
peer.setChoking(true);
uploaders--;
coordinator.uploaders--;
interestedUploaders--;
coordinator.decrementUploaders(true);
removedCount++;
// Put it at the back of the list
it.remove();
removed.add(peer);
}
else if (!peer.isChoking() && download < worstdownload)
else if (peer.isInteresting() && !peer.isChoked() &&
download < worstdownload)
{
// Make sure download is good if we are uploading
worstdownload = download;
worstDownloader = peer;
}
else if (upload < worstdownload && coordinator.completed() &&
// give new peers a better chance to get their first four pieces
(peer.completed() >= 4 || random.nextInt(8) == 0))
{
// Make sure upload is good if we are seeding
worstdownload = upload;
worstDownloader = peer;
}
}
}
peer.retransmitRequests();
// send PEX, about every 12 minutes
if (((_runCount + i) % 17) == 0 && !peer.isCompleted())
coordinator.sendPeers(peer);
// send Comment Request, about every 30 minutes
if (fetchComments && ((_runCount + i) % 47) == 0)
coordinator.sendCommentReq(peer);
// cheap failsafe for seeds connected to seeds, stop pinging and hopefully
// the inactive checker (above) will eventually disconnect it
if (coordinator.getNeededLength() > 0 || !peer.isCompleted())
peer.keepAlive();
// announce them to local tracker (TrackerClient does this too)
if (dht != null && (_runCount % 5) == 0) {
dht.announce(coordinator.getInfoHash(), peer.getPeerID().getDestHash(),
peer.isCompleted());
}
} // for peer
// Resync actual uploaders value
// (can shift a bit by disconnecting peers)
coordinator.uploaders = uploaders;
coordinator.setUploaders(uploaders, interestedUploaders);
// Remove the worst downloader if needed.
if (uploaders >= PeerCoordinator.MAX_UPLOADERS
&& interested > PeerCoordinator.MAX_UPLOADERS
// Remove the worst downloader if needed. (uploader if seeding)
if (((uploaders == uploadLimit
&& coordinator.getInterestedAndChoking() > 0)
|| uploaders > uploadLimit)
&& worstDownloader != null)
{
if (Snark.debug >= Snark.DEBUG)
Snark.debug("Choke worst downloader: " + worstDownloader,
Snark.DEBUG);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Choke worst downloader: " + worstDownloader);
worstDownloader.setChoking(true);
coordinator.uploaders--;
coordinator.decrementUploaders(worstDownloader.isInterested());
removedCount++;
// Put it at the back of the list
coordinator.peers.remove(worstDownloader);
coordinator.peerCount = coordinator.peers.size();
removed.add(worstDownloader);
}
// Optimistically unchoke a peer
coordinator.unchokePeer();
// Put peers back at the end of the list that we removed earlier.
coordinator.peers.addAll(removed);
coordinator.peerCount = coordinator.peers.size();
}
if (coordinator.halted()) {
cancel();
}
boolean coordOver = coordinator.overUpBWLimit(uploaded);
synchronized (coordinator.peers) {
if ((!overBWLimit) && !coordOver) {
// Optimistically unchoke a peer
// must be called inside synch
coordinator.unchokePeer();
}
// Put peers back at the end of the list that we removed earlier.
for(Peer peer : removed) {
if (coordinator.peers.remove(peer))
coordinator.peers.add(peer);
}
}
coordinator.addInterestedAndChoking(removedCount);
// store the rates
coordinator.setRateHistory(uploaded, downloaded);
if (_log.shouldLog(Log.DEBUG))
_log.debug("END peers: " + peerList.size() + " uploaders: " + uploaders +
" interested: " + interestedUploaders);
// close out unused files, but we don't need to do it every time
Storage storage = coordinator.getStorage();
if (storage != null) {
// The more files a torrent has, the more often we call the cleaner,
// to keep from running out of FDs
int files = storage.getFileCount();
int skip;
if (files == 1)
skip = 6;
else if (files <= 4)
skip = 4;
else if (files <= 20)
skip = 3;
else if (files <= 50)
skip = 2;
else
skip = 1;
if ((_runCount % skip) == 0)
storage.cleanRAFs();
}
// announce ourselves to local tracker (TrackerClient does this too)
if (dht != null && (_runCount % 16) == 0) {
dht.announce(coordinator.getInfoHash(), coordinator.completed());
}
}
}
......@@ -20,37 +20,53 @@
package org.klomp.snark;
import java.io.*;
import java.net.*;
import java.util.*;
import java.io.DataInputStream;
import java.io.IOException;
import net.i2p.I2PAppContext;
import net.i2p.util.Log;
class PeerConnectionIn implements Runnable
{
private Log _log = new Log(PeerConnectionIn.class);
private final Log _log = I2PAppContext.getGlobalContext().logManager().getLog(PeerConnectionIn.class);
private final Peer peer;
private final DataInputStream din;
private Thread thread;
private boolean quit;
// The max length of a complete message in bytes.
// The biggest is the piece message, for which the length is the
// request size (32K) plus 9. (we could also check if Storage.MAX_PIECES / 8
// in the bitfield message is bigger but it's currently 5000/8 = 625 so don't bother)
private static final int MAX_MSG_SIZE = Math.max(PeerState.PARTSIZE + 9,
MagnetState.CHUNK_SIZE + 100); // 100 for the ext msg dictionary
private volatile Thread thread;
private volatile boolean quit;
long lastRcvd;
public PeerConnectionIn(Peer peer, DataInputStream din)
{
this.peer = peer;
this.din = din;
quit = false;
lastRcvd = System.currentTimeMillis();
}
void disconnect()
{
if (quit == true)
if (quit)
return;
quit = true;
Thread t = thread;
if (t != null)
t.interrupt();
if (din != null) {
try {
din.close();
} catch (IOException ioe) {
//_log.warn("Error closing the stream from " + peer, ioe);
}
}
}
public void run()
......@@ -58,111 +74,172 @@ class PeerConnectionIn implements Runnable
thread = Thread.currentThread();
try
{
PeerState ps = peer.state;
while (!quit && ps != null)
while (!quit)
{
final PeerState ps = peer.state;
if (ps == null)
break;
// Common variables used for some messages.
int piece;
int begin;
int len;
// Wait till we hear something...
// The length of a complete message in bytes.
int i = din.readInt();
if (i < 0)
lastRcvd = System.currentTimeMillis();
if (i < 0 || i > MAX_MSG_SIZE)
throw new IOException("Unexpected length prefix: " + i);
if (i == 0)
{
ps.keepAliveMessage();
if (_log.shouldLog(Log.DEBUG))
_log.debug("Received keepalive from " + peer + " on " + peer.metainfo.getName());
_log.debug("Received keepalive from " + peer);
ps.keepAliveMessage();
continue;
}
byte b = din.readByte();
Message m = new Message();
m.type = b;
switch (b)
{
case 0:
ps.chokeMessage(true);
case Message.CHOKE:
if (_log.shouldLog(Log.DEBUG))
_log.debug("Received choke from " + peer + " on " + peer.metainfo.getName());
_log.debug("Received choke from " + peer);
ps.chokeMessage(true);
break;
case 1:
ps.chokeMessage(false);
case Message.UNCHOKE:
if (_log.shouldLog(Log.DEBUG))
_log.debug("Received unchoke from " + peer + " on " + peer.metainfo.getName());
_log.debug("Received unchoke from " + peer);
ps.chokeMessage(false);
break;
case 2:
ps.interestedMessage(true);
case Message.INTERESTED:
if (_log.shouldLog(Log.DEBUG))
_log.debug("Received interested from " + peer + " on " + peer.metainfo.getName());
_log.debug("Received interested from " + peer);
ps.interestedMessage(true);
break;
case 3:
ps.interestedMessage(false);
case Message.UNINTERESTED:
if (_log.shouldLog(Log.DEBUG))
_log.debug("Received not interested from " + peer + " on " + peer.metainfo.getName());
_log.debug("Received not interested from " + peer);
ps.interestedMessage(false);
break;
case 4:
case Message.HAVE:
piece = din.readInt();
ps.haveMessage(piece);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Received havePiece(" + piece + ") from " + peer + " on " + peer.metainfo.getName());
_log.debug("Received havePiece(" + piece + ") from " + peer);
ps.haveMessage(piece);
break;
case 5:
case Message.BITFIELD:
byte[] bitmap = new byte[i-1];
din.readFully(bitmap);
ps.bitfieldMessage(bitmap);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Received bitmap from " + peer + " on " + peer.metainfo.getName() + ": size=" + (i-1) + ": " + ps.bitfield);
_log.debug("Received bitmap from " + peer + ": size=" + (i-1) /* + ": " + ps.bitfield */ );
ps.bitfieldMessage(bitmap);
break;
case 6:
case Message.REQUEST:
piece = din.readInt();
begin = din.readInt();
len = din.readInt();
ps.requestMessage(piece, begin, len);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Received request(" + piece + "," + begin + ") from " + peer + " on " + peer.metainfo.getName());
_log.debug("Received request(" + piece + "," + begin + ") from " + peer);
ps.requestMessage(piece, begin, len);
break;
case 7:
case Message.PIECE:
piece = din.readInt();
begin = din.readInt();
len = i-9;
Request req = ps.getOutstandingRequest(piece, begin, len);
byte[] piece_bytes;
if (req != null)
{
piece_bytes = req.bs;
din.readFully(piece_bytes, begin, len);
ps.pieceMessage(req);
req.read(din, peer);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Received data(" + piece + "," + begin + ") from " + peer + " on " + peer.metainfo.getName());
_log.debug("Received data(" + piece + "," + begin + ") from " + peer);
ps.pieceMessage(req);
}
else
{
// XXX - Consume but throw away afterwards.
piece_bytes = new byte[len];
din.readFully(piece_bytes);
int rcvd = din.skipBytes(len);
if (rcvd != len)
throw new IOException("EOF reading unwanted data");
if (_log.shouldLog(Log.DEBUG))
_log.debug("Received UNWANTED data(" + piece + "," + begin + ") from " + peer + " on " + peer.metainfo.getName());
_log.debug("Received UNWANTED data(" + piece + "," + begin + ") from " + peer);
}
break;
case 8:
case Message.CANCEL:
piece = din.readInt();
begin = din.readInt();
len = din.readInt();
if (_log.shouldLog(Log.DEBUG))
_log.debug("Received cancel(" + piece + "," + begin + ") from " + peer);
ps.cancelMessage(piece, begin, len);
break;
case Message.PORT:
int port = din.readUnsignedShort();
if (_log.shouldLog(Log.DEBUG))
_log.debug("Received port message from " + peer);
ps.portMessage(port);
break;
case Message.EXTENSION:
int id = din.readUnsignedByte();
byte[] payload = new byte[i-2];
din.readFully(payload);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Received extension message from " + peer);
ps.extensionMessage(id, payload);
break;
// fast extensions below here
case Message.SUGGEST:
piece = din.readInt();
ps.suggestMessage(piece);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Received suggest(" + piece + ") from " + peer);
break;
case Message.HAVE_ALL:
ps.haveMessage(true);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Received cancel(" + piece + "," + begin + ") from " + peer + " on " + peer.metainfo.getName());
_log.debug("Received have_all from " + peer);
break;
case Message.HAVE_NONE:
ps.haveMessage(false);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Received have_none from " + peer);
break;
case Message.REJECT:
piece = din.readInt();
begin = din.readInt();
len = din.readInt();
ps.rejectMessage(piece, begin, len);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Received reject(" + piece + ',' + begin + ',' + len + ") from " + peer);
break;
case Message.ALLOWED_FAST:
piece = din.readInt();
ps.allowedFastMessage(piece);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Received allowed_fast(" + piece + ") from " + peer);
break;
default:
byte[] bs = new byte[i-1];
din.readFully(bs);
ps.unknownMessage(b, bs);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Received unknown message from " + peer + " on " + peer.metainfo.getName());
_log.debug("Received unknown message from " + peer);
}
}
}
......@@ -172,11 +249,9 @@ class PeerConnectionIn implements Runnable
if (_log.shouldLog(Log.INFO))
_log.info("IOError talking with " + peer, ioe);
}
catch (Throwable t)
catch (RuntimeException t)
{
_log.error("Error talking with " + peer, t);
if (t instanceof OutOfMemoryError)
throw (OutOfMemoryError)t;
}
finally
{
......
......@@ -20,17 +20,23 @@
package org.klomp.snark;
import java.io.*;
import java.net.*;
import java.util.*;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.atomic.AtomicLong;
import net.i2p.util.I2PThread;
import net.i2p.I2PAppContext;
import net.i2p.util.I2PAppThread;
import net.i2p.util.Log;
import net.i2p.util.SimpleTimer;
//import net.i2p.util.SimpleTimer;
class PeerConnectionOut implements Runnable
{
private Log _log = new Log(PeerConnectionOut.class);
private final Log _log = I2PAppContext.getGlobalContext().logManager().getLog(PeerConnectionOut.class);
private final Peer peer;
private final DataOutputStream dout;
......@@ -38,10 +44,10 @@ class PeerConnectionOut implements Runnable
private boolean quit;
// Contains Messages.
private List sendQueue = new ArrayList();
private final BlockingQueue<Message> sendQueue = new LinkedBlockingQueue<Message>();
private static long __id = 0;
private long _id;
private static final AtomicLong __id = new AtomicLong();
private final long _id;
long lastSent;
......@@ -49,45 +55,57 @@ class PeerConnectionOut implements Runnable
{
this.peer = peer;
this.dout = dout;
_id = ++__id;
_id = __id.incrementAndGet();
lastSent = System.currentTimeMillis();
quit = false;
}
public void startup() {
thread = new I2PThread(this, "Snark sender " + _id + ": " + peer);
thread = new I2PAppThread(this, "Snark sender " + _id + ": " + peer);
thread.start();
}
/**
* Continuesly monitors for more outgoing messages that have to be send.
* Stops if quit is true of an IOException occurs.
* Stops if quit is true or an IOException occurs.
*/
public void run()
{
try
{
boolean shouldThrottleRequests = false;
while (!quit && peer.isConnected())
{
Message m = null;
PeerState state = null;
boolean shouldFlush;
synchronized(sendQueue)
{
while (!quit && peer.isConnected() && sendQueue.isEmpty())
shouldFlush = !quit && peer.isConnected() && sendQueue.isEmpty();
}
if (shouldFlush)
// Make sure everything will reach the other side.
// flush while not holding lock, could take a long time
dout.flush();
synchronized(sendQueue)
{
while (!quit && peer.isConnected() && (shouldThrottleRequests || sendQueue.isEmpty()))
{
try
{
// Make sure everything will reach the other side.
dout.flush();
// don't flush while holding lock, could take a long time
// dout.flush();
// Wait till more data arrives.
sendQueue.wait(60*1000);
sendQueue.wait(shouldThrottleRequests ? 5000 : 60*1000);
}
catch (InterruptedException ie)
{
/* ignored */
}
shouldThrottleRequests = false;
}
state = peer.state;
if (!quit && state != null && peer.isConnected())
......@@ -101,53 +119,101 @@ class PeerConnectionOut implements Runnable
// And remove piece messages if we are choking.
// this should get fixed for starvation
Iterator it = sendQueue.iterator();
Iterator<Message> it = sendQueue.iterator();
while (m == null && it.hasNext())
{
Message nm = (Message)it.next();
Message nm = it.next();
if (nm.type == Message.PIECE)
{
if (state.choking) {
it.remove();
SimpleTimer.getInstance().removeEvent(nm.expireEvent);
if (peer.supportsFast()) {
Message r = new Message(Message.REJECT, nm.piece, nm.begin, nm.length);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Send " + peer + ": " + r);
r.sendMessage(dout);
}
}
nm = null;
}
else if (nm.type == Message.REQUEST && state.choked)
else if (nm.type == Message.REQUEST)
{
it.remove();
SimpleTimer.getInstance().removeEvent(nm.expireEvent);
nm = null;
if (state.choked) {
it.remove();
nm = null;
} else if (shouldThrottleRequests) {
// previous request in queue throttled, skip this one too
if (_log.shouldWarn())
_log.warn("Additional throttle: " + nm + " to " + peer);
nm = null;
} else if (!peer.shouldRequest(nm.length)) {
// request throttle, skip this and all others in this loop
if (_log.shouldWarn())
_log.warn("Throttle: " + nm + " to " + peer);
shouldThrottleRequests = true;
nm = null;
}
}
if (m == null && nm != null)
if (nm != null)
{
m = nm;
SimpleTimer.getInstance().removeEvent(nm.expireEvent);
it.remove();
}
}
if (m == null && sendQueue.size() > 0) {
m = (Message)sendQueue.remove(0);
SimpleTimer.getInstance().removeEvent(m.expireEvent);
if (m == null) {
m = sendQueue.peek();
if (m != null && m.type == Message.PIECE) {
// bandwidth limiting
// Pieces are the last thing in the queue to be sent so we can
// simply wait right here and then loop
if (!peer.shouldSend(Math.min(m.length, PeerState.PARTSIZE))) {
if (_log.shouldWarn())
_log.warn("Throttle: " + m + " to " + peer);
try {
sendQueue.wait(5000);
} catch (InterruptedException ie) {}
continue;
}
} else if (m != null && m.type == Message.REQUEST) {
if (shouldThrottleRequests)
continue;
}
m = sendQueue.poll();
}
}
}
if (m != null)
{
if (_log.shouldLog(Log.DEBUG))
_log.debug("Send " + peer + ": " + m + " on " + peer.metainfo.getName());
m.sendMessage(dout);
_log.debug("Send " + peer + ": " + m);
// This can block for quite a while.
// To help get slow peers going, and track the bandwidth better,
// move this _after_ state.uploaded() and see how it works.
//m.sendMessage(dout);
lastSent = System.currentTimeMillis();
// Remove all piece messages after sending a choke message.
// FiXME this causes REJECT messages to be sent before sending the CHOKE;
// BEP 6 recommends sending them after.
if (m.type == Message.CHOKE)
removeMessage(Message.PIECE);
// XXX - Should also register overhead...
if (m.type == Message.PIECE)
state.uploaded(m.len);
// Don't let other clients requesting big chunks get an advantage
// when we are seeding;
// only count the rest of the upload after sendMessage().
int remainder = 0;
if (m.type == Message.PIECE) {
// first PARTSIZE was signalled in shouldSend() above
if (m.len > PeerState.PARTSIZE)
remainder = m.len - PeerState.PARTSIZE;
}
m.sendMessage(dout);
if (remainder > 0)
peer.uploaded(remainder);
m = null;
}
}
......@@ -183,13 +249,13 @@ class PeerConnectionOut implements Runnable
thread.interrupt();
sendQueue.clear();
sendQueue.notify();
sendQueue.notifyAll();
}
if (dout != null) {
try {
dout.close();
} catch (IOException ioe) {
_log.warn("Error closing the stream to " + peer, ioe);
//_log.warn("Error closing the stream to " + peer, ioe);
}
}
}
......@@ -200,16 +266,17 @@ class PeerConnectionOut implements Runnable
*/
private void addMessage(Message m)
{
SimpleTimer.getInstance().addEvent(new RemoveTooSlow(m), SEND_TIMEOUT);
synchronized(sendQueue)
{
sendQueue.add(m);
sendQueue.offer(m);
sendQueue.notifyAll();
}
}
/** remove messages not sent in 3m */
private static final int SEND_TIMEOUT = 3*60*1000;
/*****
private class RemoveTooSlow implements SimpleTimer.TimedEvent {
private Message _m;
public RemoveTooSlow(Message m) {
......@@ -227,6 +294,7 @@ class PeerConnectionOut implements Runnable
_log.info("Took too long to send " + _m + " to " + peer);
}
}
*****/
/**
* Removes a particular message type from the queue.
......@@ -240,15 +308,22 @@ class PeerConnectionOut implements Runnable
boolean removed = false;
synchronized(sendQueue)
{
Iterator it = sendQueue.iterator();
Iterator<Message> it = sendQueue.iterator();
while (it.hasNext())
{
Message m = (Message)it.next();
if (m.type == type)
{
Message m = it.next();
if (m.type == type) {
it.remove();
removed = true;
}
if (type == Message.PIECE && peer.supportsFast()) {
Message r = new Message(Message.REJECT, m.piece, m.begin, m.length);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Send " + peer + ": " + r);
try {
r.sendMessage(dout);
} catch (IOException ioe) {}
}
}
}
sendQueue.notifyAll();
}
......@@ -257,9 +332,14 @@ class PeerConnectionOut implements Runnable
void sendAlive()
{
Message m = new Message();
m.type = Message.KEEP_ALIVE;
addMessage(m);
synchronized(sendQueue)
{
if(sendQueue.isEmpty()) {
Message m = new Message(Message.KEEP_ALIVE);
sendQueue.offer(m);
}
sendQueue.notifyAll();
}
}
void sendChoke(boolean choke)
......@@ -272,11 +352,7 @@ class PeerConnectionOut implements Runnable
: Message.CHOKE;
if (!removeMessage(inverseType))
{
Message m = new Message();
if (choke)
m.type = Message.CHOKE;
else
m.type = Message.UNCHOKE;
Message m = new Message(choke ? Message.CHOKE : Message.UNCHOKE);
addMessage(m);
}
}
......@@ -290,11 +366,7 @@ class PeerConnectionOut implements Runnable
: Message.INTERESTED;
if (!removeMessage(inverseType))
{
Message m = new Message();
if (interest)
m.type = Message.INTERESTED;
else
m.type = Message.UNINTERESTED;
Message m = new Message(interest ? Message.INTERESTED : Message.UNINTERESTED);
addMessage(m);
}
}
......@@ -302,66 +374,157 @@ class PeerConnectionOut implements Runnable
void sendHave(int piece)
{
Message m = new Message();
m.type = Message.HAVE;
m.piece = piece;
Message m = new Message(Message.HAVE, piece);
addMessage(m);
}
void sendBitfield(BitField bitfield)
{
Message m = new Message();
m.type = Message.BITFIELD;
m.data = bitfield.getFieldBytes();
m.off = 0;
m.len = m.data.length;
addMessage(m);
boolean fast = peer.supportsFast();
boolean all = false;
boolean none = false;
byte[] data = null;
synchronized(bitfield) {
if (fast && bitfield.complete()) {
all = true;
} else if (fast && bitfield.count() <= 0) {
none = true;
} else {
byte[] d = bitfield.getFieldBytes();
data = Arrays.copyOf(d, d.length);
}
}
if (all) {
sendHaveAll();
} else if (none) {
sendHaveNone();
} else {
Message m = new Message(data);
addMessage(m);
}
}
/** reransmit requests not received in 7m */
private static final int REQ_TIMEOUT = (2 * SEND_TIMEOUT) + (60 * 1000);
void retransmitRequests(List<Request> requests)
{
long now = System.currentTimeMillis();
Iterator<Request> it = requests.iterator();
while (it.hasNext())
{
Request req = it.next();
if(now > req.sendTime + REQ_TIMEOUT) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Retransmit request " + req + " to peer " + peer);
sendRequest(req);
}
}
}
void sendRequests(List requests)
void sendRequests(List<Request> requests)
{
Iterator it = requests.iterator();
Iterator<Request> it = requests.iterator();
while (it.hasNext())
{
Request req = (Request)it.next();
Request req = it.next();
sendRequest(req);
}
}
void sendRequest(Request req)
{
Message m = new Message();
m.type = Message.REQUEST;
m.piece = req.piece;
m.begin = req.off;
m.length = req.len;
// Check for duplicate requests to deal with fibrillating i2p-bt
// (multiple choke/unchokes received cause duplicate requests in the queue)
synchronized(sendQueue)
{
Iterator<Message> it = sendQueue.iterator();
while (it.hasNext())
{
Message m = it.next();
if (m.type == Message.REQUEST && m.piece == req.getPiece() &&
m.begin == req.off && m.length == req.len)
{
if (_log.shouldLog(Log.DEBUG))
_log.debug("Discarding duplicate request " + req + " to peer " + peer);
return;
}
}
}
Message m = new Message(Message.REQUEST, req.getPiece(), req.off, req.len);
addMessage(m);
req.sendTime = System.currentTimeMillis();
}
// Used by PeerState to limit pipelined requests
int queuedBytes()
{
int total = 0;
synchronized(sendQueue)
{
Iterator<Message> it = sendQueue.iterator();
while (it.hasNext())
{
Message m = it.next();
if (m.type == Message.PIECE)
total += m.length;
}
}
return total;
}
/**
* Queue a piece message with a callback to load the data
* from disk when required.
* @since 0.8.2
*/
void sendPiece(int piece, int begin, int length, DataLoader loader)
{
/****
boolean sendNow = false;
// are there any cases where we should?
if (sendNow) {
// queue the real thing
byte[] bytes = loader.loadData(piece, begin, length);
if (bytes != null)
sendPiece(piece, begin, length, bytes);
return;
}
****/
// queue a fake message... set everything up,
// except save the PeerState instead of the bytes.
Message m = new Message(piece, begin, length, loader);
addMessage(m);
}
/**
* Queue a piece message with the data already loaded from disk
* Also add a timeout.
* We don't use this anymore.
*/
/****
void sendPiece(int piece, int begin, int length, byte[] bytes)
{
Message m = new Message();
m.type = Message.PIECE;
m.piece = piece;
m.begin = begin;
m.length = length;
m.data = bytes;
m.off = begin;
m.len = length;
Message m = new Message(piece, begin, length, bytes);
// since we have the data already loaded, queue a timeout to remove it
// no longer prefetched
addMessage(m);
}
****/
/** send cancel */
void sendCancel(Request req)
{
// See if it is still in our send queue
synchronized(sendQueue)
{
Iterator it = sendQueue.iterator();
Iterator<Message> it = sendQueue.iterator();
while (it.hasNext())
{
Message m = (Message)it.next();
Message m = it.next();
if (m.type == Message.REQUEST
&& m.piece == req.piece
&& m.piece == req.getPiece()
&& m.begin == req.off
&& m.length == req.len)
it.remove();
......@@ -369,25 +532,38 @@ class PeerConnectionOut implements Runnable
}
// Always send, just to be sure it it is really canceled.
Message m = new Message();
m.type = Message.CANCEL;
m.piece = req.piece;
m.begin = req.off;
m.length = req.len;
Message m = new Message(Message.CANCEL, req.getPiece(), req.off, req.len);
addMessage(m);
}
// Called by the PeerState when the other side doesn't want this
// request to be handled anymore. Removes any pending Piece Message
// from out send queue.
/**
* Remove all Request messages from the queue.
* Does not send a cancel message.
* @since 0.8.2
*/
void cancelRequestMessages() {
synchronized(sendQueue) {
for (Iterator<Message> it = sendQueue.iterator(); it.hasNext(); ) {
if (it.next().type == Message.REQUEST)
it.remove();
}
}
}
/**
* Called by the PeerState when the other side doesn't want this
* request to be handled anymore. Removes any pending Piece Message
* from out send queue.
* Does not send a cancel message.
*/
void cancelRequest(int piece, int begin, int length)
{
synchronized (sendQueue)
{
Iterator it = sendQueue.iterator();
Iterator<Message> it = sendQueue.iterator();
while (it.hasNext())
{
Message m = (Message)it.next();
Message m = it.next();
if (m.type == Message.PIECE
&& m.piece == piece
&& m.begin == begin
......@@ -396,4 +572,56 @@ class PeerConnectionOut implements Runnable
}
}
}
/** @since 0.8.2 */
void sendExtension(int id, byte[] bytes) {
Message m = new Message(id, bytes);
addMessage(m);
}
/** @since 0.8.4 */
void sendPort(int port) {
Message m = new Message(Message.PORT, port);
addMessage(m);
}
/**
* Unused
* @since 0.9.21
*/
/****
void sendSuggest(int piece) {
Message m = new Message(Message.SUGGEST, piece);
addMessage(m);
}
****/
/** @since 0.9.21 */
private void sendHaveAll() {
Message m = new Message(Message.HAVE_ALL);
addMessage(m);
}
/** @since 0.9.21 */
private void sendHaveNone() {
Message m = new Message(Message.HAVE_NONE);
addMessage(m);
}
/** @since 0.9.21 */
void sendReject(int piece, int begin, int length) {
Message m = new Message(Message.REJECT, piece, begin, length);
addMessage(m);
}
/**
* Unused
* @since 0.9.21
*/
/****
void sendAllowedFast(int piece) {
Message m = new Message(Message.ALLOWED_FAST, piece);
addMessage(m);
}
****/
}
......@@ -20,110 +20,335 @@
package org.klomp.snark;
import java.util.*;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Deque;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.LinkedBlockingDeque;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import net.i2p.util.I2PThread;
import net.i2p.I2PAppContext;
import net.i2p.data.ByteArray;
import net.i2p.data.DataHelper;
import net.i2p.data.Destination;
import net.i2p.util.ConcurrentHashSet;
import net.i2p.util.I2PAppThread;
import net.i2p.util.Log;
import net.i2p.util.RandomSource;
import net.i2p.util.SimpleTimer2;
import org.klomp.snark.bencode.BEValue;
import org.klomp.snark.bencode.InvalidBEncodingException;
import org.klomp.snark.comments.Comment;
import org.klomp.snark.comments.CommentSet;
import org.klomp.snark.dht.DHT;
/**
* Coordinates what peer does what.
*/
public class PeerCoordinator implements PeerListener
class PeerCoordinator implements PeerListener, BandwidthListener
{
private final Log _log = new Log(PeerCoordinator.class);
final MetaInfo metainfo;
final Storage storage;
final Snark snark;
private final Log _log;
/**
* External use by PeerMonitorTask only.
* Will be null when in magnet mode.
*/
MetaInfo metainfo;
/**
* External use by PeerMonitorTask only.
* Will be null when in magnet mode.
*/
Storage storage;
private final Snark snark;
// package local for access by CheckDownLoadersTask
final static long CHECK_PERIOD = 20*1000; // 20 seconds
final static int MAX_CONNECTIONS = 24;
final static int MAX_UPLOADERS = 12; // i2p: might as well balance it out
final static long CHECK_PERIOD = 30*1000;
final static int MAX_UPLOADERS = 8;
public static final long MAX_INACTIVE = 8*60*1000;
public static final long MAX_SEED_INACTIVE = 2*60*1000;
// Approximation of the number of current uploaders.
// Resynced by PeerChecker once in a while.
int uploaders = 0;
/**
* Approximation of the number of current uploaders (unchoked peers),
* whether interested or not.
* Resynced by PeerChecker once in a while.
*/
private final AtomicInteger uploaders = new AtomicInteger();
/**
* Approximation of the number of current uploaders (unchoked peers),
* that are interested.
* Resynced by PeerChecker once in a while.
*/
private final AtomicInteger interestedUploaders = new AtomicInteger();
/**
* External use by PeerCheckerTask only.
*/
private final AtomicInteger interestedAndChoking = new AtomicInteger();
// final static int MAX_DOWNLOADERS = MAX_CONNECTIONS;
// int downloaders = 0;
private long uploaded;
private long downloaded;
private final AtomicLong uploaded = new AtomicLong();
private final AtomicLong downloaded = new AtomicLong();
final static int RATE_DEPTH = 3; // make following arrays RATE_DEPTH long
private final long uploaded_old[] = {-1,-1,-1};
private final long downloaded_old[] = {-1,-1,-1};
/**
* synchronize on this when changing peers or downloaders.
* This is a Queue, not a Set, because PeerCheckerTask keeps things in order for choking/unchoking.
* External use by PeerMonitorTask only.
*/
final Deque<Peer> peers;
/**
* Peers we heard about via PEX
*/
private final Set<PeerID> pexPeers;
// synchronize on this when changing peers or downloaders
final List peers = new ArrayList();
/** estimate of the peers, without requiring any synchronization */
volatile int peerCount;
private volatile int peerCount;
/** Timer to handle all periodical tasks. */
private final Timer timer = new Timer(true);
private final CheckEvent timer;
// RerequestEvent and related values
private final SimpleTimer2.TimedEvent rerequestTimer;
private final Object rerequestLock = new Object();
private boolean wasRequestAllowed;
private boolean isRerequestScheduled;
private final byte[] id;
private final byte[] infohash;
// Some random wanted pieces
private final List wantedPieces;
/** The wanted pieces. We could use a TreeSet but we'd have to clear and re-add everything
* when priorities change.
*/
private final List<Piece> wantedPieces;
private boolean halted = false;
/** The total number of bytes in wantedPieces, or -1 if not yet known.
* Sync on wantedPieces.
* @since 0.9.1
*/
private long wantedBytes;
/** partial pieces - lock by synching on wantedPieces - TODO store Requests, not PartialPieces */
private final List<PartialPiece> partialPieces;
private volatile boolean halted;
private final MagnetState magnetState;
private final CoordinatorListener listener;
public String trackerProblems = null;
public int trackerSeenPeers = 0;
private final BandwidthListener bwListener;
private final I2PSnarkUtil _util;
private final RandomSource _random;
public PeerCoordinator(byte[] id, MetaInfo metainfo, Storage storage,
CoordinatorListener listener, Snark torrent)
private final AtomicLong _commentsLastRequested = new AtomicLong();
private final AtomicInteger _commentsNotRequested = new AtomicInteger();
private static final long COMMENT_REQ_INTERVAL = 12*60*60*1000L;
private static final long COMMENT_REQ_DELAY = 60*60*1000L;
private static final int MAX_COMMENT_NOT_REQ = 10;
/** hostname to expire time, sync on this */
private Map<String, Long> _webPeerBans;
private static final long WEBPEER_BAN_TIME = 30*60*1000L;
/**
* @param metainfo null if in magnet mode
* @param storage null if in magnet mode
*/
public PeerCoordinator(I2PSnarkUtil util, byte[] id, byte[] infohash, MetaInfo metainfo, Storage storage,
CoordinatorListener listener, Snark torrent, BandwidthListener bwl)
{
_util = util;
_random = util.getContext().random();
_log = util.getContext().logManager().getLog(PeerCoordinator.class);
this.id = id;
this.infohash = infohash;
this.metainfo = metainfo;
this.storage = storage;
this.listener = listener;
this.snark = torrent;
bwListener = bwl;
// Make a list of pieces
wantedPieces = new ArrayList();
BitField bitfield = storage.getBitField();
for(int i = 0; i < metainfo.getPieces(); i++)
if (!bitfield.get(i))
wantedPieces.add(new Piece(i));
Collections.shuffle(wantedPieces);
wantedPieces = new ArrayList<Piece>();
setWantedPieces();
partialPieces = new ArrayList<PartialPiece>(getMaxConnections() + 1);
peers = new LinkedBlockingDeque<Peer>();
magnetState = new MagnetState(infohash, metainfo);
pexPeers = new ConcurrentHashSet<PeerID>();
// Install a timer to check the uploaders.
timer.schedule(new PeerCheckerTask(this), CHECK_PERIOD, CHECK_PERIOD);
// Randomize the first start time so multiple tasks are spread out,
// this will help the behavior with global limits
timer = new CheckEvent(_util.getContext(), new PeerCheckerTask(_util, this));
timer.schedule((CHECK_PERIOD / 2) + _random.nextInt((int) CHECK_PERIOD));
// NOT scheduled until needed
rerequestTimer = new RerequestEvent();
// we don't store the last-requested time, so just delay a random amount
_commentsLastRequested.set(util.getContext().clock().now() - (COMMENT_REQ_INTERVAL - _random.nextLong(COMMENT_REQ_DELAY)));
}
/**
* Run the PeerCheckerTask via the SimpleTimer2 executors
* @since 0.8.2
*/
private static class CheckEvent extends SimpleTimer2.TimedEvent {
private final PeerCheckerTask _task;
public CheckEvent(I2PAppContext ctx, PeerCheckerTask task) {
super(ctx.simpleTimer2());
_task = task;
}
public void timeReached() {
_task.run();
schedule(CHECK_PERIOD);
}
}
/**
* Rerequest after unthrottled
* @since 0.9.62
*/
private class RerequestEvent extends SimpleTimer2.TimedEvent {
/** caller must schedule */
public RerequestEvent() {
super(_util.getContext().simpleTimer2());
}
public void timeReached() {
if (bwListener.shouldRequest(null, 0)) {
if (_log.shouldWarn())
_log.warn("Now unthrottled, rerequest timer poking all peers");
// so shouldRequest() won't fire us up again
synchronized(rerequestLock) {
wasRequestAllowed = true;
}
for (Peer p : peers) {
if (p.isInteresting() && !p.isChoked())
p.request();
}
synchronized(rerequestLock) {
isRerequestScheduled = false;
}
} else {
if (_log.shouldWarn())
_log.warn("Still throttled, rerequest timer reschedule");
synchronized(rerequestLock) {
wasRequestAllowed = false;
}
schedule(2*1000);
}
}
}
/**
* Only called externally from Storage after the double-check fails.
* Sets wantedBytes too.
*/
public void setWantedPieces()
{
if (metainfo == null || storage == null) {
wantedBytes = -1;
return;
}
// Make a list of pieces
synchronized(wantedPieces) {
wantedPieces.clear();
BitField bitfield = storage.getBitField();
int[] pri = storage.getPiecePriorities();
long count = 0;
for (int i = 0; i < metainfo.getPieces(); i++) {
// only add if we don't have and the priority is >= 0
if ((!bitfield.get(i)) &&
(pri == null || pri[i] >= 0)) {
Piece p = new Piece(i);
if (pri != null)
p.setPriority(pri[i]);
wantedPieces.add(p);
count += metainfo.getPieceLength(i);
}
}
wantedBytes = count;
Collections.shuffle(wantedPieces, _random);
}
}
public Storage getStorage() { return storage; }
public CoordinatorListener getListener() { return listener; }
/** for web page detailed stats */
public List<Peer> peerList()
{
return new ArrayList<Peer>(peers);
}
public byte[] getID()
{
return id;
}
public String getName()
{
return snark.getName();
}
public boolean completed()
{
// FIXME return metainfo complete status
if (storage == null)
return false;
return storage.complete();
}
/** might be wrong */
public int getPeerCount() { return peerCount; }
/** should be right */
public int getPeers()
{
synchronized(peers)
{
int rv = peers.size();
peerCount = rv;
return rv;
}
}
/**
* Returns how many bytes are still needed to get the complete file.
* Bytes not yet in storage. Does NOT account for skipped files.
* Returns how many bytes are still needed to get the complete torrent.
* @return -1 if in magnet mode
*/
public long getLeft()
{
// XXX - Only an approximation.
return storage.needed() * metainfo.getPieceLength(0);
if (metainfo == null | storage == null)
return -1;
int psz = metainfo.getPieceLength(0);
long rv = ((long) storage.needed()) * psz;
int last = metainfo.getPieces() - 1;
BitField bf = storage.getBitField();
if (bf != null && !bf.get(last))
rv -= psz - metainfo.getPieceLength(last);
return rv;
}
/**
* Bytes still wanted. DOES account for skipped files.
* @return exact value. or -1 if no storage yet.
* @since 0.9.1
*/
public long getNeededLength() {
return wantedBytes;
}
/**
......@@ -131,7 +356,15 @@ public class PeerCoordinator implements PeerListener
*/
public long getUploaded()
{
return uploaded;
return uploaded.get();
}
/**
* Sets the initial total of uploaded bytes of all peers (from a saved status)
* @since 0.9.15
*/
public void setUploaded(long up) {
uploaded.set(up);
}
/**
......@@ -139,28 +372,268 @@ public class PeerCoordinator implements PeerListener
*/
public long getDownloaded()
{
return downloaded;
return downloaded.get();
}
/////// begin BandwidthListener interface ///////
/**
* Called when a peer has uploaded some bytes of a piece.
* @since 0.9.62 params changed
*/
public void uploaded(int size) {
uploaded.addAndGet(size);
bwListener.uploaded(size);
}
/**
* Called when a peer has downloaded some bytes of a piece.
* @since 0.9.62 params changed
*/
public void downloaded(int size) {
downloaded.addAndGet(size);
bwListener.downloaded(size);
}
/**
* Should we send this many bytes?
* Do NOT call uploaded() if this returns true.
* @since 0.9.62
*/
public boolean shouldSend(int size) {
boolean rv = bwListener.shouldSend(size);
if (rv)
uploaded.addAndGet(size);
return rv;
}
/**
* Should we request this many bytes?
* @since 0.9.62
*/
public boolean shouldRequest(Peer peer, int size) {
boolean rv;
synchronized(rerequestLock) {
rv = bwListener.shouldRequest(peer, size);
if (!wasRequestAllowed && rv) {
// we weren't allowed and now we are
if (isRerequestScheduled) {
// just let the timer run when scheduled, do not pull it in
// to prevent thrashing
//if (_log.shouldWarn())
// _log.warn("Now unthrottled, BUT DON'T reschedule rerequest timer");
} else {
// schedule the timer
// we still have to throw it to the timer so we don't loop
if (_log.shouldWarn())
_log.warn("Now unthrottled, schedule rerequest timer");
isRerequestScheduled = true;
// no rush, wait at little while
rerequestTimer.reschedule(1000);
}
wasRequestAllowed = true;
} else if (wasRequestAllowed && !rv) {
// we were allowed and now we aren't
if (!isRerequestScheduled) {
// schedule the timer
if (_log.shouldWarn())
_log.warn("Now throttled, schedule rerequest timer");
isRerequestScheduled = true;
rerequestTimer.schedule(3*1000);
}
wasRequestAllowed = false;
}
}
return rv;
}
/**
* Push the total uploaded/downloaded onto a RATE_DEPTH deep stack
*/
public void setRateHistory(long up, long down)
{
setRate(up, uploaded_old);
setRate(down, downloaded_old);
}
static void setRate(long val, long array[])
{
synchronized(array) {
for (int i = RATE_DEPTH-1; i > 0; i--)
array[i] = array[i-1];
array[0] = val;
}
}
/**
* Returns the average rate in Bps
* over last RATE_DEPTH * CHECK_PERIOD seconds
*/
public long getDownloadRate()
{
if (halted)
return 0;
return getRate(downloaded_old);
}
/**
* Returns the average rate in Bps
* over last RATE_DEPTH * CHECK_PERIOD seconds
*/
public long getUploadRate()
{
if (halted)
return 0;
return getRate(uploaded_old);
}
/**
* Returns the rate in Bps
* over last complete CHECK_PERIOD seconds
*/
public long getCurrentUploadRate()
{
if (halted)
return 0;
// no need to synchronize, only one value
long r = uploaded_old[0];
if (r <= 0)
return 0;
return (r * 1000) / CHECK_PERIOD;
}
static long getRate(long array[])
{
long rate = 0;
int i = 0;
int factor = 0;
synchronized(array) {
for ( ; i < RATE_DEPTH; i++) {
if (array[i] < 0)
break;
int f = RATE_DEPTH - i;
rate += array[i] * f;
factor += f;
}
}
if (i == 0)
return 0;
return rate / (factor * CHECK_PERIOD / 1000);
}
/**
* Current limit in Bps
* @since 0.9.62
*/
public long getUpBWLimit() {
return bwListener.getUpBWLimit();
}
/**
* Is snark as a whole over its limit?
*/
public boolean overUpBWLimit()
{
return bwListener.overUpBWLimit();
}
/**
* Is a particular peer who has downloaded this many bytes from us
* in the last CHECK_PERIOD over its limit?
*/
public boolean overUpBWLimit(long total)
{
return total * 1000 / CHECK_PERIOD > getUpBWLimit();
}
/**
* Current limit in Bps
* @since 0.9.62
*/
public long getDownBWLimit() {
return bwListener.getDownBWLimit();
}
/**
* Are we currently over the limit?
* @since 0.9.62
*/
public boolean overDownBWLimit() {
return bwListener.overDownBWLimit();
}
/////// end BandwidthListener interface ///////
public MetaInfo getMetaInfo()
{
return metainfo;
}
/** @since 0.8.4 */
public byte[] getInfoHash()
{
return infohash;
}
/**
* Inbound.
* Not halted, peers &lt; max.
* @since 0.9.1
*/
public boolean needPeers()
{
synchronized(peers)
{
return !halted && peers.size() < MAX_CONNECTIONS;
}
return !halted && peers.size() < getMaxConnections();
}
/**
* Outbound.
* Not halted, peers &lt; max, and need pieces.
* @since 0.9.1
*/
public boolean needOutboundPeers() {
//return wantedBytes != 0 && needPeers();
// minus two to make it a little easier for new peers to get in on large swarms
return (wantedBytes != 0 ||
(_util.utCommentsEnabled() &&
// we should also check SnarkManager.getSavedCommentsEnabled() for this torrent,
// but that reads in the config file, there's no caching.
// TODO
_commentsLastRequested.get() < _util.getContext().clock().now() - COMMENT_REQ_INTERVAL)) &&
!halted &&
peers.size() < getMaxConnections() - 2 &&
(storage == null || !storage.isChecking());
}
/**
* Formerly used to
* reduce max if huge pieces to keep from ooming when leeching
* but now we don't
* @return usually I2PSnarkUtil.MAX_CONNECTIONS
*/
private int getMaxConnections() {
if (metainfo == null)
return 6;
int pieces = metainfo.getPieces();
int max = _util.getMaxConnections();
if (pieces <= 10) {
if (max > 4) max = 4;
} else if (pieces <= 25) {
if (max > 10) max = 10;
} else if (pieces <= 80) {
if (max > 16) max = 16;
}
long bwl = getDownBWLimit();
if (bwl < 32*1024)
max = Math.min(max, Math.max(6, (int) (I2PSnarkUtil.MAX_CONNECTIONS * bwl / (32*1024))));
return max;
}
public boolean halted() { return halted; }
public void halt()
{
halted = true;
List removed = new ArrayList();
List<Peer> removed = new ArrayList<Peer>();
synchronized(peers)
{
// Stop peer checker task.
......@@ -172,11 +645,38 @@ public class PeerCoordinator implements PeerListener
peerCount = 0;
}
while (removed.size() > 0) {
Peer peer = (Peer)removed.remove(0);
while (!removed.isEmpty()) {
Peer peer = removed.remove(0);
peer.disconnect();
removePeerFromPieces(peer);
}
// delete any saved orphan partial piece
synchronized (partialPieces) {
for (PartialPiece pp : partialPieces) {
pp.release();
}
partialPieces.clear();
}
}
/**
* @since 0.9.1
*/
public void restart() {
halted = false;
synchronized (uploaded_old) {
Arrays.fill(uploaded_old, 0);
}
synchronized (downloaded_old) {
Arrays.fill(downloaded_old, 0);
}
// failsafe
synchronized(wantedPieces) {
for (Piece pc : wantedPieces) {
pc.clear();
}
}
timer.schedule((CHECK_PERIOD / 2) + _random.nextInt((int) CHECK_PERIOD));
}
public void connected(Peer peer)
......@@ -191,8 +691,10 @@ public class PeerCoordinator implements PeerListener
synchronized(peers)
{
Peer old = peerIDInList(peer.getPeerID(), peers);
if ( (old != null) && (old.getInactiveTime() > 2*60*1000) ) {
// idle for 2 minutes, kill the old con
if (old != null && old.getInactiveTime() > old.getMaxInactiveTime()) {
// idle for 8 minutes, kill the old con (32KB/8min = 68B/sec minimum for one block)
if (_log.shouldLog(Log.WARN))
_log.warn("Remomving old peer: " + peer + ": " + old + ", inactive for " + old.getInactiveTime());
peers.remove(old);
toDisconnect = old;
old = null;
......@@ -201,21 +703,47 @@ public class PeerCoordinator implements PeerListener
{
if (_log.shouldLog(Log.WARN))
_log.warn("Already connected to: " + peer + ": " + old + ", inactive for " + old.getInactiveTime());
// toDisconnect = peer to get out of synchronized(peers)
peer.disconnect(false); // Don't deregister this connection/peer.
}
// This is already checked in addPeer() but we could have gone over the limit since then
else if (peers.size() >= getMaxConnections())
{
if (_log.shouldLog(Log.WARN))
_log.warn("Already at MAX_CONNECTIONS in connected() with peer: " + peer);
// toDisconnect = peer to get out of synchronized(peers)
peer.disconnect(false);
}
else
{
if (_log.shouldLog(Log.INFO))
_log.info("New connection to peer: " + peer + " for " + metainfo.getName());
if (_log.shouldLog(Log.INFO)) {
// just for logging
String name;
if (metainfo == null)
name = "Magnet";
else
name = metainfo.getName();
_log.info("New connection to peer: " + peer + " for " + name);
}
// We may have gotten the metainfo after the peer was created.
if (metainfo != null)
peer.setMetaInfo(metainfo);
// Add it to the beginning of the list.
// And try to optimistically make it a uploader.
peers.add(0, peer);
// Can't add to beginning since we converted from a List to a Queue
// We can do this in Java 6 with a Deque
//peers.add(0, peer);
if (_util.getContext().random().nextInt(4) == 0)
peers.push(peer);
else
peers.add(peer);
peerCount = peers.size();
unchokePeer();
if (listener != null)
listener.peerChange(this, peer);
//if (listener != null)
// listener.peerChange(this, peer);
}
}
if (toDisconnect != null) {
......@@ -224,115 +752,159 @@ public class PeerCoordinator implements PeerListener
}
}
private static Peer peerIDInList(PeerID pid, List peers)
/**
* @return peer if peer id is in the collection, else null
*/
private static Peer peerIDInList(PeerID pid, Collection<Peer> peers)
{
Iterator it = peers.iterator();
Iterator<Peer> it = peers.iterator();
while (it.hasNext()) {
Peer cur = (Peer)it.next();
Peer cur = it.next();
if (pid.sameID(cur.getPeerID()))
return cur;
}
return null;
}
public void addPeer(final Peer peer)
/**
* Add peer (inbound or outbound)
* @return true if actual attempt to add peer occurs
*/
public boolean addPeer(final Peer peer)
{
if (halted)
{
peer.disconnect(false);
return;
return false;
}
boolean need_more;
int peersize = 0;
synchronized(peers)
{
need_more = !peer.isConnected() && peers.size() < MAX_CONNECTIONS;
peersize = peers.size();
// This isn't a strict limit, as we may have several pending connections;
// thus there is an additional check in connected()
need_more = (!peer.isConnected()) && peersize < getMaxConnections();
// Check if we already have this peer before we build the connection
if (need_more) {
Peer old = peerIDInList(peer.getPeerID(), peers);
need_more = old == null || old.getInactiveTime() > old.getMaxInactiveTime();
}
}
if (need_more)
{
_log.debug("Adding a peer " + peer.getPeerID().getAddress().calculateHash().toBase64() + " for " + metainfo.getName(), new Exception("add/run"));
if (_log.shouldLog(Log.DEBUG)) {
// just for logging
String name;
if (metainfo == null)
name = "Magnet";
else
name = metainfo.getName();
_log.debug("Adding a peer " + peer.getPeerID().toString() + " for " + name, new Exception("add/run"));
}
// Run the peer with us as listener and the current bitfield.
final PeerListener listener = this;
final BitField bitfield = storage.getBitField();
final BitField bitfield;
if (storage != null)
bitfield = storage.getBitField();
else
bitfield = null;
if (!peer.isIncoming() && wantedBytes == 0 && _log.shouldInfo())
_log.info("Outbound connection as seed to get comments for " + snark.getBaseName() + " to " + peer);
// if we aren't a seed but we don't want any more
final boolean partialComplete = wantedBytes == 0 && bitfield != null && !bitfield.complete();
Runnable r = new Runnable()
{
public void run()
{
peer.runConnection(listener, bitfield);
peer.runConnection(_util, listener, PeerCoordinator.this, bitfield, magnetState, partialComplete);
}
};
String threadName = peer.toString();
new I2PThread(r, threadName).start();
}
else
if (_log.shouldLog(Log.DEBUG)) {
if (peer.isConnected())
_log.info("Add peer already connected: " + peer);
else
_log.info("MAX_CONNECTIONS = " + MAX_CONNECTIONS
+ " not accepting extra peer: " + peer);
String threadName = "Snark peer " + peer.toString();
new I2PAppThread(r, threadName).start();
return true;
}
if (_log.shouldLog(Log.DEBUG)) {
if (peer.isConnected())
_log.debug("Add peer already connected: " + peer);
else
_log.debug("Connections: " + peersize + "/" + getMaxConnections()
+ " not accepting extra peer: " + peer);
}
return false;
}
// (Optimistically) unchoke. Should be called with peers synchronized
/**
* (Optimistically) unchoke. Must be called with peers synchronized
*/
void unchokePeer()
{
if (storage == null || storage.getBitField().size() == 0)
return;
if (overUpBWLimit())
return;
// linked list will contain all interested peers that we choke.
// At the start are the peers that have us unchoked at the end the
// other peer that are interested, but are choking us.
List interested = new LinkedList();
synchronized (peers) {
Iterator it = peers.iterator();
List<Peer> interested = new LinkedList<Peer>();
int count = 0;
int unchokedCount = 0;
int maxUploaders = allowedUploaders();
Iterator<Peer> it = peers.iterator();
while (it.hasNext())
{
Peer peer = (Peer)it.next();
boolean remove = false;
if (uploaders < MAX_UPLOADERS
&& peer.isChoking()
&& peer.isInterested())
Peer peer = it.next();
if (peer.isChoking() && peer.isInterested())
{
if (!peer.isChoked())
interested.add(0, peer);
else
interested.add(peer);
count++;
if (uploaders.get() < maxUploaders)
{
if (peer.isInteresting() && !peer.isChoked())
interested.add(unchokedCount++, peer);
else
interested.add(peer);
}
}
}
while (uploaders < MAX_UPLOADERS && interested.size() > 0)
int up = uploaders.get();
while (up < maxUploaders && !interested.isEmpty())
{
Peer peer = (Peer)interested.remove(0);
Peer peer = interested.remove(0);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Unchoke: " + peer);
peer.setChoking(false);
uploaders++;
up = uploaders.incrementAndGet();
interestedUploaders.incrementAndGet();
count--;
// Put peer back at the end of the list.
peers.remove(peer);
peers.add(peer);
peerCount = peers.size();
}
}
}
public byte[] getBitMap()
{
return storage.getBitField().getFieldBytes();
interestedAndChoking.set(count);
}
/**
* Returns true if we don't have the given piece yet.
* @return true if we still want the given piece
*/
public boolean gotHave(Peer peer, int piece)
{
if (listener != null)
listener.peerChange(this, peer);
//if (listener != null)
// listener.peerChange(this, peer);
synchronized(wantedPieces)
{
return wantedPieces.contains(new Piece(piece));
}
synchronized(wantedPieces) {
for (Piece pc : wantedPieces) {
if (pc.getId() == piece) {
pc.addPeer(peer);
return true;
}
}
return false;
}
}
/**
......@@ -341,135 +913,269 @@ public class PeerCoordinator implements PeerListener
*/
public boolean gotBitField(Peer peer, BitField bitfield)
{
if (listener != null)
listener.peerChange(this, peer);
//if (listener != null)
// listener.peerChange(this, peer);
synchronized(wantedPieces)
{
Iterator it = wantedPieces.iterator();
while (it.hasNext())
{
Piece p = (Piece)it.next();
boolean rv = false;
synchronized(wantedPieces) {
for (Piece p : wantedPieces) {
int i = p.getId();
if (bitfield.get(i))
if (bitfield.get(i)) {
p.addPeer(peer);
return true;
}
}
return false;
rv = true;
}
}
}
return rv;
}
/**
* This should be somewhat less than the max conns per torrent,
* but not too much less, so a torrent doesn't get stuck near the end.
* @since 0.7.14
*/
private static final int END_GAME_THRESHOLD = 8;
/**
* Max number of peers to get a piece from when in end game
* @since 0.8.1
*/
private static final int MAX_PARALLEL_REQUESTS = 4;
/**
* Returns one of pieces in the given BitField that is still wanted or
* -1 if none of the given pieces are wanted.
* null if none of the given pieces are wanted.
*
* @param record if true, actually record in our data structures that we gave the
* request to this peer. If false, do not update the data structures.
* @since 0.8.2
*/
public int wantPiece(Peer peer, BitField havePieces)
{
private Piece wantPiece(Peer peer, BitField havePieces, boolean record) {
if (halted) {
if (_log.shouldLog(Log.WARN))
_log.warn("We don't want anything from the peer, as we are halted! peer=" + peer);
return -1;
return null;
}
Piece piece = null;
List<Piece> requested = new ArrayList<Piece>();
int wantedSize = END_GAME_THRESHOLD + 1;
synchronized(wantedPieces)
{
Piece piece = null;
Collections.sort(wantedPieces); // Sort in order of rarest first.
List requested = new ArrayList();
Iterator it = wantedPieces.iterator();
if (record)
Collections.sort(wantedPieces); // Sort in order of rarest first.
Iterator<Piece> it = wantedPieces.iterator();
while (piece == null && it.hasNext())
{
Piece p = (Piece)it.next();
Piece p = it.next();
// sorted by priority, so when we hit a disabled piece we are done
if (p.isDisabled())
break;
if (havePieces.get(p.getId()) && !p.isRequested())
{
piece = p;
// never ever choose one that's in partialPieces, or we
// will create a second one and leak
boolean hasPartial = false;
for (PartialPiece pp : partialPieces) {
if (pp.getPiece() == p.getId()) {
if (_log.shouldLog(Log.INFO))
_log.info("wantPiece() skipping partial for " + peer + ": piece = " + pp);
hasPartial = true;
break;
}
}
if (!hasPartial)
piece = p;
}
else if (p.isRequested())
{
requested.add(p);
}
}
if (piece == null)
wantedSize = wantedPieces.size();
//Only request a piece we've requested before if there's no other choice.
if (piece == null) {
Iterator it2 = requested.iterator();
// AND if there are almost no wanted pieces left (real end game).
// If we do end game all the time, we generate lots of extra traffic
// when the seeder is super-slow and all the peers are "caught up"
if (wantedSize > END_GAME_THRESHOLD) {
if (_log.shouldLog(Log.INFO))
_log.info("Nothing to request, " + requested.size() + " being requested and " +
wantedSize + " still wanted");
return null; // nothing to request and not in end game
}
// let's not all get on the same piece
// Even better would be to sort by number of requests
if (record)
Collections.shuffle(requested, _random);
Iterator<Piece> it2 = requested.iterator();
while (piece == null && it2.hasNext())
{
Piece p = (Piece)it2.next();
if (havePieces.get(p.getId()))
{
piece = p;
}
Piece p = it2.next();
if (havePieces.get(p.getId())) {
// limit number of parallel requests
int requestedCount = p.getRequestCount();
if (requestedCount < MAX_PARALLEL_REQUESTS &&
!p.isRequestedBy(peer)) {
piece = p;
break;
}
}
}
if (piece == null) {
if (_log.shouldLog(Log.WARN))
_log.warn("nothing to even rerequest from " + peer + ": requested = " + requested
+ " wanted = " + wantedPieces + " peerHas = " + havePieces);
return -1; //If we still can't find a piece we want, so be it.
_log.warn("nothing to even rerequest from " + peer + ": requested = " + requested);
// _log.warn("nothing to even rerequest from " + peer + ": requested = " + requested
// + " wanted = " + wantedPieces + " peerHas = " + havePieces);
return null; //If we still can't find a piece we want, so be it.
} else {
// Should be a lot smarter here -
// share blocks rather than starting from 0 with each peer.
// This is where the flaws of the snark data model are really exposed.
// Could also randomize within the duplicate set rather than strict rarest-first
if (_log.shouldLog(Log.INFO))
_log.info("parallel request (end game?) for " + peer + ": piece = " + piece);
}
}
piece.setRequested(true);
return piece.getId();
if (record) {
if (_log.shouldLog(Log.INFO))
_log.info("Now requesting from " + peer + ": piece " + piece + " priority " + piece.getPriority() +
" peers " + piece.getPeerCount() + '/' + peers.size());
piece.setRequested(peer, true);
}
return piece;
} // synch
}
/**
* Maps file priorities to piece priorities.
* Call after updating file priorities Storage.setPriority()
* @since 0.8.1
*/
public void updatePiecePriorities() {
if (storage == null)
return;
int[] pri = storage.getPiecePriorities();
if (pri == null) {
_log.debug("Updated piece priorities called but no priorities to set?");
return;
}
List<Piece> toCancel = new ArrayList<Piece>();
synchronized(wantedPieces) {
// Add incomplete and previously unwanted pieces to the list
// Temp to avoid O(n**2)
BitField want = new BitField(pri.length);
for (Piece p : wantedPieces) {
want.set(p.getId());
}
BitField bitfield = storage.getBitField();
for (int i = 0; i < pri.length; i++) {
if (pri[i] >= 0 && !bitfield.get(i)) {
if (!want.get(i)) {
Piece piece = new Piece(i);
wantedPieces.add(piece);
wantedBytes += metainfo.getPieceLength(i);
// As connections are already up, new Pieces will
// not have their PeerID list populated, so do that.
for (Peer p : peers) {
// TODO don't access state directly
PeerState s = p.state;
if (s != null) {
BitField bf = s.bitfield;
if (bf != null && bf.get(i))
piece.addPeer(p);
}
}
}
}
}
// now set the new priorities and remove newly unwanted pieces
for (Iterator<Piece> iter = wantedPieces.iterator(); iter.hasNext(); ) {
Piece p = iter.next();
int priority = pri[p.getId()];
if (priority >= 0) {
p.setPriority(priority);
} else {
iter.remove();
toCancel.add(p);
wantedBytes -= metainfo.getPieceLength(p.getId());
}
}
if (_log.shouldLog(Log.DEBUG))
_log.debug("Updated piece priorities, now wanted: " + wantedPieces);
// if we added pieces, they will be in-order unless we shuffle
Collections.shuffle(wantedPieces, _random);
}
// cancel outside of wantedPieces lock to avoid deadlocks
if (!toCancel.isEmpty()) {
// cancel all peers
for (Peer peer : peers) {
for (Piece p : toCancel) {
peer.cancel(p.getId());
}
}
}
// ditto, avoid deadlocks
// update request queues, in case we added wanted pieces
// and we were previously uninterested
for (Peer peer : peers) {
peer.request();
}
}
/**
* Returns a byte array containing the requested piece or null of
* the piece is unknown.
*
* @return bytes or null for errors such as not having the piece yet
* @throws RuntimeException on IOE getting the data
*/
public byte[] gotRequest(Peer peer, int piece)
public ByteArray gotRequest(Peer peer, int piece, int off, int len)
{
if (halted)
return null;
if (metainfo == null || storage == null)
return null;
try
{
return storage.getPiece(piece);
return storage.getPiece(piece, off, len);
}
catch (IOException ioe)
{
snark.stopTorrent();
_log.error("Error reading the storage for " + metainfo.getName(), ioe);
throw new RuntimeException("B0rked");
String msg = "Error reading the storage (piece " + piece + ") for " + metainfo.getName() + ": " + ioe;
_log.error(msg, ioe);
if (listener != null) {
listener.addMessage(msg);
listener.addMessage("Fatal storage error: Stopping torrent " + metainfo.getName());
}
throw new RuntimeException(msg, ioe);
}
}
/**
* Called when a peer has uploaded some bytes of a piece.
*/
public void uploaded(Peer peer, int size)
{
uploaded += size;
if (listener != null)
listener.peerChange(this, peer);
}
/**
* Called when a peer has downloaded some bytes of a piece.
*/
public void downloaded(Peer peer, int size)
{
downloaded += size;
if (listener != null)
listener.peerChange(this, peer);
}
/**
* Returns false if the piece is no good (according to the hash).
* In that case the peer that supplied the piece should probably be
* blacklisted.
*
* @throws RuntimeException on IOE saving the piece
*/
public boolean gotPiece(Peer peer, int piece, byte[] bs)
public boolean gotPiece(Peer peer, PartialPiece pp)
{
if (halted) {
_log.info("Got while-halted piece " + piece + "/" + metainfo.getPieces() +" from " + peer + " for " + metainfo.getName());
return true; // We don't actually care anymore.
if (metainfo == null || storage == null || storage.isChecking() || halted) {
pp.release();
return true;
}
int piece = pp.getPiece();
synchronized(wantedPieces)
{
// try/catch outside the synch to avoid deadlock in the catch
try {
synchronized(wantedPieces) {
Piece p = new Piece(piece);
if (!wantedPieces.contains(p))
{
......@@ -477,77 +1183,128 @@ public class PeerCoordinator implements PeerListener
// No need to announce have piece to peers.
// Assume we got a good piece, we don't really care anymore.
return true;
// Well, this could be caused by a change in priorities, so
// only return true if we already have it, otherwise might as well keep it.
if (storage.getBitField().get(piece)) {
pp.release();
return true;
}
}
try
{
if (storage.putPiece(piece, bs))
// try/catch moved outside of synch
// this takes forever if complete, as it rechecks
if (storage.putPiece(pp))
{
_log.info("Got valid piece " + piece + "/" + metainfo.getPieces() +" from " + peer + " for " + metainfo.getName());
if (_log.shouldLog(Log.INFO))
_log.info("Got valid piece " + piece + "/" + metainfo.getPieces() +" from " + peer + " for " + metainfo.getName());
}
else
{
// so we will try again
markUnrequested(peer, piece);
// just in case
removePartialPiece(piece);
// Oops. We didn't actually download this then... :(
downloaded -= metainfo.getPieceLength(piece);
_log.warn("Got BAD piece " + piece + "/" + metainfo.getPieces() + " from " + peer + " for " + metainfo.getName());
// Reports of counter going negative?
//downloaded.addAndGet(0 - metainfo.getPieceLength(piece));
// Mark this peer as not having the piece. PeerState will update its bitfield.
for (Piece pc : wantedPieces) {
if (pc.getId() == piece) {
pc.removePeer(peer);
break;
}
}
if (_log.shouldWarn())
_log.warn("Got BAD piece " + piece + "/" + metainfo.getPieces() + " from " + peer + " for " + metainfo.getName());
return false; // No need to announce BAD piece to peers.
}
}
catch (IOException ioe)
{
snark.stopTorrent();
_log.error("Error writing storage for " + metainfo.getName(), ioe);
throw new RuntimeException("B0rked");
}
wantedPieces.remove(p);
}
wantedBytes -= metainfo.getPieceLength(p.getId());
} // synch
} catch (IOException ioe) {
String msg = "Error writing storage (piece " + piece + ") for " + metainfo.getName() + ": " + ioe;
_log.error(msg, ioe);
if (listener != null) {
listener.addMessage(msg);
listener.addMessage("Fatal storage error: Stopping torrent " + metainfo.getName());
}
// deadlock was here
snark.stopTorrent();
throw new RuntimeException(msg, ioe);
}
// just in case
removePartialPiece(piece);
boolean done = wantedBytes <= 0;
// Announce to the world we have it!
synchronized(peers)
{
Iterator it = peers.iterator();
while (it.hasNext())
{
Peer p = (Peer)it.next();
// Disconnect from other seeders when we get the last piece
List<Peer> toDisconnect = done ? new ArrayList<Peer>() : null;
for (Peer p : peers) {
if (p.isConnected())
p.have(piece);
}
}
{
if (done && p.isCompleted())
toDisconnect.add(p);
else
p.have(piece);
}
}
if (done) {
for (Peer p : toDisconnect) {
p.disconnect(true);
}
// put msg on the console if partial, since Storage won't do it
if (!completed())
snark.storageCompleted(storage);
synchronized (partialPieces) {
for (PartialPiece ppp : partialPieces) {
ppp.release();
}
partialPieces.clear();
}
}
return true;
}
/** this does nothing but logging */
public void gotChoke(Peer peer, boolean choke)
{
if (_log.shouldLog(Log.INFO))
_log.info("Got choke(" + choke + "): " + peer);
if (listener != null)
listener.peerChange(this, peer);
//if (listener != null)
// listener.peerChange(this, peer);
}
public void gotInterest(Peer peer, boolean interest)
{
if (interest)
{
synchronized(peers)
{
if (uploaders < MAX_UPLOADERS)
if (storage == null || storage.getBitField().size() == 0) {
// XD bug #80
return;
}
if (uploaders.get() < allowedUploaders())
{
if(peer.isChoking())
if(peer.isChoking() && !overUpBWLimit())
{
uploaders++;
uploaders.incrementAndGet();
interestedUploaders.incrementAndGet();
peer.setChoking(false);
if (_log.shouldLog(Log.INFO))
_log.info("Unchoke: " + peer);
}
}
}
}
if (listener != null)
listener.peerChange(this, peer);
//if (listener != null)
// listener.peerChange(this, peer);
}
public void disconnected(Peer peer)
......@@ -567,19 +1324,612 @@ public class PeerCoordinator implements PeerListener
peerCount = peers.size();
}
if (listener != null)
listener.peerChange(this, peer);
//if (listener != null)
// listener.peerChange(this, peer);
}
/** Called when a peer is removed, to prevent it from being used in
* rarest-first calculations.
*/
public void removePeerFromPieces(Peer peer) {
private void removePeerFromPieces(Peer peer) {
synchronized(wantedPieces) {
for(Iterator iter = wantedPieces.iterator(); iter.hasNext(); ) {
Piece piece = (Piece)iter.next();
for (Piece piece : wantedPieces) {
piece.removePeer(peer);
piece.setRequested(peer, false);
}
}
}
/**
* Save partial pieces on peer disconnection
* and hopefully restart it later.
* Replace a partial piece in the List if the new one is bigger.
* Storage method is private so we can expand to save multiple partials
* if we wish.
*
* Also mark the piece unrequested if this peer was the only one.
*
* @param peer partials, must include the zero-offset (empty) ones too.
* No dup pieces.
* len field in Requests is ignored.
* @since 0.8.2
*/
public void savePartialPieces(Peer peer, List<Request> partials)
{
if (_log.shouldLog(Log.INFO))
_log.info("Partials received from " + peer + ": " + partials);
if (halted || completed()) {
for (Request req : partials) {
PartialPiece pp = req.getPartialPiece();
pp.release();
}
return;
}
synchronized(wantedPieces) {
for (Request req : partials) {
PartialPiece pp = req.getPartialPiece();
if (pp.hasData()) {
// PartialPiece.equals() only compares piece number, which is what we want
int idx = partialPieces.indexOf(pp);
if (idx < 0) {
partialPieces.add(pp);
if (_log.shouldLog(Log.INFO))
_log.info("Saving orphaned partial piece (new) " + pp);
} else if (pp.getDownloaded() > partialPieces.get(idx).getDownloaded()) {
// replace what's there now
partialPieces.get(idx).release();
partialPieces.set(idx, pp);
if (_log.shouldLog(Log.INFO))
_log.info("Saving orphaned partial piece (bigger) " + pp);
} else {
pp.release();
if (_log.shouldLog(Log.INFO))
_log.info("Discarding partial piece (not bigger)" + pp);
}
int max = getMaxConnections();
if (partialPieces.size() > max) {
// sorts by preference, highest first
Collections.sort(partialPieces);
PartialPiece gone = partialPieces.remove(partialPieces.size() - 1);
gone.release();
if (_log.shouldLog(Log.INFO))
_log.info("Discarding orphaned partial piece (list full) " + gone);
}
} else {
// drop the empty partial piece
pp.release();
}
// synchs on wantedPieces...
markUnrequested(peer, pp.getPiece());
}
if (_log.shouldLog(Log.INFO))
_log.info("Partial list size now: " + partialPieces.size());
}
}
/**
* Return partial piece to the PeerState if it's still wanted and peer has it.
* @param havePieces pieces the peer has, the rv will be one of these
*
* @return PartialPiece or null
* @since 0.8.2
*/
public PartialPiece getPartialPiece(Peer peer, BitField havePieces) {
if (metainfo == null)
return null;
if (storage != null && storage.isChecking())
return null;
synchronized(wantedPieces) {
// sorts by preference, highest first
Collections.sort(partialPieces);
for (Iterator<PartialPiece> iter = partialPieces.iterator(); iter.hasNext(); ) {
PartialPiece pp = iter.next();
int savedPiece = pp.getPiece();
if (havePieces.get(savedPiece)) {
// this is just a double-check, it should be in there
boolean skipped = false;
outer:
for(Piece piece : wantedPieces) {
if (piece.getId() == savedPiece) {
if (peer.isCompleted() && piece.getPeerCount() > 1 &&
wantedPieces.size() > 2*END_GAME_THRESHOLD &&
partialPieces.size() < 4 &&
_random.nextInt(4) != 0) {
// Try to preserve rarest-first
// by not requesting a partial piece that at least two non-seeders also have
// from a seeder
int nonSeeds = 0;
int seeds = 0;
for (Peer pr : peers) {
if (pr.isCompleted()) {
if (++seeds >= 4)
break;
} else {
// TODO don't access state directly
PeerState state = pr.state;
if (state == null) continue;
BitField bf = state.bitfield;
if (bf == null) continue;
if (bf.get(savedPiece)) {
if (++nonSeeds > 1) {
skipped = true;
break outer;
}
}
}
}
}
iter.remove();
piece.setRequested(peer, true);
if (_log.shouldLog(Log.INFO)) {
_log.info("Restoring orphaned partial piece " + pp + " to " + peer +
" Partial list size now: " + partialPieces.size());
}
return pp;
}
}
if (_log.shouldLog(Log.INFO)) {
if (skipped)
_log.info("Partial piece " + pp + " with multiple peers skipped for seeder");
else
_log.info("Partial piece " + pp + " NOT in wantedPieces??");
}
}
}
if (_log.shouldLog(Log.INFO) && !partialPieces.isEmpty())
_log.info("Peer " + peer + " has none of our partials " + partialPieces);
}
// ...and this section turns this into the general move-requests-around code!
// Temporary? So PeerState never calls wantPiece() directly for now...
Piece piece = wantPiece(peer, havePieces, true);
if (piece != null) {
// TODO padding
return new PartialPiece(piece, metainfo.getPieceLength(piece.getId()), _util.getTempDir());
}
if (_log.shouldLog(Log.DEBUG))
_log.debug("We have no partial piece to return");
return null;
}
/**
* Called when we are downloading from the peer and may need to ask for
* a new piece. Returns true if wantPiece() or getPartialPiece() would return a piece.
*
* @param peer the Peer that will be asked to provide the piece.
* @param havePieces a BitField containing the pieces that the other
* side has.
*
* @return if we want any of what the peer has
* @since 0.8.2
*/
public boolean needPiece(Peer peer, BitField havePieces) {
synchronized(wantedPieces) {
for (PartialPiece pp : partialPieces) {
int savedPiece = pp.getPiece();
if (havePieces.get(savedPiece)) {
// this is just a double-check, it should be in there
for(Piece piece : wantedPieces) {
if (piece.getId() == savedPiece) {
if (_log.shouldLog(Log.INFO)) {
_log.info("We could restore orphaned partial piece " + pp);
}
return true;
}
}
}
}
}
return wantPiece(peer, havePieces, false) != null;
}
/**
* Remove saved state for this piece.
* Unless we are in the end game there shouldnt be anything in there.
* Do not call with wantedPieces lock held (deadlock)
*/
private void removePartialPiece(int piece) {
synchronized(wantedPieces) {
for (Iterator<PartialPiece> iter = partialPieces.iterator(); iter.hasNext(); ) {
PartialPiece pp = iter.next();
if (pp.getPiece() == piece) {
iter.remove();
pp.release();
// there should be only one but keep going to be sure
}
}
}
}
/**
* Clear the requested flag for a piece
*/
private void markUnrequested(Peer peer, int piece)
{
synchronized(wantedPieces)
{
for (Piece pc : wantedPieces) {
if (pc.getId() == piece) {
pc.setRequested(peer, false);
return;
}
}
}
}
/**
* PeerListener callback
* @since 0.8.4
*/
public void gotExtension(Peer peer, int id, byte[] bs) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Got extension message " + id + " from " + peer);
// basic handling done in PeerState... here we just check if we are done
if (metainfo == null && id == ExtensionHandler.ID_METADATA) {
synchronized (magnetState) {
if (magnetState.isComplete()) {
if (_log.shouldLog(Log.WARN))
_log.warn("Got completed metainfo via extension");
metainfo = magnetState.getMetaInfo();
listener.gotMetaInfo(this, metainfo);
}
}
} else if (id == ExtensionHandler.ID_HANDSHAKE) {
// We may not have the bitfield yet, but if we do, don't send PEX to seeds
if (!peer.isCompleted())
sendPeers(peer);
sendDHT(peer);
if (_util.utCommentsEnabled())
sendCommentReq(peer);
}
}
/**
* Send a PEX message to the peer, if he supports PEX.
* This sends everybody we have connected to since the
* last time we sent PEX to him.
* @since 0.8.4
*/
void sendPeers(Peer peer) {
if (metainfo != null && metainfo.isPrivate())
return;
Map<String, BEValue> handshake = peer.getHandshakeMap();
if (handshake == null)
return;
BEValue bev = handshake.get("m");
if (bev == null)
return;
try {
if (bev.getMap().get(ExtensionHandler.TYPE_PEX) != null) {
List<Peer> pList = new ArrayList<Peer>();
long t = peer.getPexLastSent();
for (Peer p : peers) {
if (p.equals(peer))
continue;
if (p.isWebPeer())
continue;
if (p.getWhenConnected() > t)
pList.add(p);
}
if (!pList.isEmpty()) {
ExtensionHandler.sendPEX(peer, pList);
peer.setPexLastSent(_util.getContext().clock().now());
//if (_log.shouldDebug())
// _log.debug("Pex: sent " + pList.size() + " new peers to " + peer);
//} else {
//if (_log.shouldDebug())
// _log.debug("Pex: no new peers to send to " + peer);
}
}
} catch (InvalidBEncodingException ibee) {}
}
/**
* Send a DHT message to the peer, if we both support DHT.
* @since DHT
*/
void sendDHT(Peer peer) {
DHT dht = _util.getDHT();
if (dht == null)
return;
Map<String, BEValue> handshake = peer.getHandshakeMap();
if (handshake == null)
return;
BEValue bev = handshake.get("m");
if (bev == null)
return;
try {
if (bev.getMap().get(ExtensionHandler.TYPE_DHT) != null)
ExtensionHandler.sendDHT(peer, dht.getPort(), dht.getRPort());
} catch (InvalidBEncodingException ibee) {}
}
/**
* Send a commment request message to the peer, if he supports it.
* @since 0.9.31
*/
void sendCommentReq(Peer peer) {
Map<String, BEValue> handshake = peer.getHandshakeMap();
if (handshake == null) {
if (wantedBytes == 0 && _commentsNotRequested.incrementAndGet() >= MAX_COMMENT_NOT_REQ)
_commentsLastRequested.set(_util.getContext().clock().now());
return;
}
BEValue bev = handshake.get("m");
if (bev == null) {
if (wantedBytes == 0 && _commentsNotRequested.incrementAndGet() >= MAX_COMMENT_NOT_REQ)
_commentsLastRequested.set(_util.getContext().clock().now());
return;
}
// TODO if peer hasn't been connected very long, don't bother
// unless forced at handshake time (see above)
try {
if (bev.getMap().get(ExtensionHandler.TYPE_COMMENT) != null) {
int sz = 0;
CommentSet comments = snark.getComments();
if (comments != null) {
synchronized(comments) {
sz = comments.size();
}
}
_commentsNotRequested.set(0);
_commentsLastRequested.set(_util.getContext().clock().now());
if (sz >= CommentSet.MAX_SIZE)
return;
ExtensionHandler.sendCommentReq(peer, CommentSet.MAX_SIZE - sz);
} else {
// failsafe to prevent seed excessively connecting out to a swarm for comments
// when nobody in the swarm supports comments
if (wantedBytes == 0 && _commentsNotRequested.incrementAndGet() >= MAX_COMMENT_NOT_REQ)
_commentsLastRequested.set(_util.getContext().clock().now());
}
} catch (InvalidBEncodingException ibee) {}
}
/**
* Sets the storage after transition out of magnet mode
* Snark calls this after we call gotMetaInfo()
* @since 0.8.4
*/
public void setStorage(Storage stg) {
storage = stg;
setWantedPieces();
// ok we should be in business
for (Peer p : peers) {
p.setMetaInfo(metainfo);
}
}
/**
* PeerListener callback
* Tell the DHT to ping it, this will get back the node info
* @param rport must be port + 1
* @since 0.8.4
*/
public void gotPort(Peer peer, int port, int rport) {
DHT dht = _util.getDHT();
if (dht != null &&
port > 0 && port < 65535 && rport == port + 1)
dht.ping(peer.getDestination(), port);
}
/**
* Get peers from PEX -
* PeerListener callback
* @since 0.8.4
*/
public void gotPeers(Peer peer, List<PeerID> peers) {
if (!needOutboundPeers())
return;
Destination myDest = _util.getMyDestination();
if (myDest == null)
return;
byte[] myHash = myDest.calculateHash().getData();
List<Peer> pList = peerList();
for (PeerID id : peers) {
if (peerIDInList(id, pList) != null)
continue;
if (DataHelper.eq(myHash, id.getDestHash()))
continue;
pexPeers.add(id);
}
// TrackerClient will poll for pexPeers and do the add in its thread,
// rather than running another thread here.
}
/**
* Called when comments are requested via ut_comment
*
* @since 0.9.31
*/
public void gotCommentReq(Peer peer, int num) {
/* TODO cache per-torrent setting, use it instead */
if (!_util.utCommentsEnabled())
return;
CommentSet comments = snark.getComments();
if (comments != null) {
int lastSent = peer.getTotalCommentsSent();
int sz;
synchronized(comments) {
sz = comments.size();
// only send if we have more than last time
if (sz <= lastSent)
return;
ExtensionHandler.locked_sendComments(peer, num, comments);
}
peer.setTotalCommentsSent(sz);
}
}
/**
* Called when comments are received via ut_comment
*
* @param comments non-null
* @since 0.9.31
*/
public void gotComments(Peer peer, List<Comment> comments) {
/* TODO cache per-torrent setting, use it instead */
if (!_util.utCommentsEnabled())
return;
if (!comments.isEmpty())
snark.addComments(comments);
}
/**
* Called by TrackerClient
* @return the Set itself, modifiable, not a copy, caller should clear()
* @since 0.8.4
*/
Set<PeerID> getPEXPeers() {
return pexPeers;
}
/** Return number of allowed uploaders for this torrent.
** Check with Snark to see if we are over the total upload limit.
*/
public int allowedUploaders()
{
int up = uploaders.get();
if (listener != null && listener.overUploadLimit(interestedUploaders.get())) {
if (_log.shouldLog(Log.DEBUG))
_log.debug("Over limit, uploaders was: " + up);
return up - 1;
} else if (up < MAX_UPLOADERS) {
return up + 1;
} else {
return MAX_UPLOADERS;
}
}
/**
* Uploaders whether interested or not
* Use this for per-torrent limits.
*
* @return current
* @since 0.8.4
*/
public int getUploaders() {
int rv = uploaders.get();
if (rv > 0) {
int max = getPeers();
if (rv > max)
rv = max;
}
return rv;
}
/**
* Uploaders, interested only.
* Use this to calculate the global total, so that
* unchoked but uninterested peers don't count against the global limit.
*
* @return current
* @since 0.9.28
*/
public int getInterestedUploaders() {
int rv = interestedUploaders.get();
if (rv > 0) {
int max = getPeers();
if (rv > max)
rv = max;
}
return rv;
}
/**
* Set the uploaders and interestedUploaders counts
*
* @since 0.9.28
* @param upl whether interested or not
* @param inter interested only
*/
public void setUploaders(int upl, int inter) {
if (upl < 0)
upl = 0;
else if (upl > MAX_UPLOADERS)
upl = MAX_UPLOADERS;
uploaders.set(upl);
if (inter < 0)
inter = 0;
else if (inter > MAX_UPLOADERS)
inter = MAX_UPLOADERS;
interestedUploaders.set(inter);
}
/**
* Decrement the uploaders and (if set) the interestedUploaders counts
*
* @since 0.9.28
*/
public void decrementUploaders(boolean isInterested) {
int up = uploaders.decrementAndGet();
if (up < 0)
uploaders.set(0);
if (isInterested) {
up = interestedUploaders.decrementAndGet();
if (up < 0)
interestedUploaders.set(0);
}
}
/**
* @return current
* @since 0.9.28
*/
public int getInterestedAndChoking() {
return interestedAndChoking.get();
}
/**
* @since 0.9.28
*/
public void addInterestedAndChoking(int toAdd) {
interestedAndChoking.addAndGet(toAdd);
}
/**
* Convenience
* @since 0.9.2
*/
public I2PSnarkUtil getUtil() {
return _util;
}
/**
* Ban a web peer for this torrent, for while or permanently.
* @param host the hostname
* @since 0.9.49
*/
public synchronized void banWebPeer(String host, boolean isPermanent) {
if (_webPeerBans == null)
_webPeerBans = new HashMap<String, Long>(4);
Long time;
if (isPermanent) {
time = Long.valueOf(Long.MAX_VALUE);
} else {
long now = _util.getContext().clock().now();
time = Long.valueOf(now + WEBPEER_BAN_TIME);
}
Long old = _webPeerBans.put(host, time);
if (old != null && old.longValue() > time)
_webPeerBans.put(host, old);
}
/**
* Is a web peer banned?
* @param host the hostname
* @since 0.9.49
*/
public synchronized boolean isWebPeerBanned(String host) {
if (_webPeerBans == null)
return false;
Long time = _webPeerBans.get(host);
if (time == null)
return false;
long now = _util.getContext().clock().now();
boolean rv = time.longValue() > now;
if (!rv)
_webPeerBans.remove(host);
return rv;
}
}
package org.klomp.snark;
import java.util.*;
import java.util.Iterator;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import net.i2p.crypto.SHA1Hash;
/**
* Hmm, any guesses as to what this is? Used by the multitorrent functionality
......@@ -8,29 +12,29 @@ import java.util.*;
* Each PeerCoordinator is added to the set from within the Snark (and removed
* from it there too)
*/
public class PeerCoordinatorSet {
private static final PeerCoordinatorSet _instance = new PeerCoordinatorSet();
public static final PeerCoordinatorSet instance() { return _instance; }
private Set _coordinators;
class PeerCoordinatorSet implements Iterable<PeerCoordinator> {
private final Map<SHA1Hash, PeerCoordinator> _coordinators;
private PeerCoordinatorSet() {
_coordinators = new HashSet();
public PeerCoordinatorSet() {
_coordinators = new ConcurrentHashMap<SHA1Hash, PeerCoordinator>();
}
public Iterator iterator() {
synchronized (_coordinators) {
return new ArrayList(_coordinators).iterator();
}
public Iterator<PeerCoordinator> iterator() {
return _coordinators.values().iterator();
}
public void add(PeerCoordinator coordinator) {
synchronized (_coordinators) {
_coordinators.add(coordinator);
}
_coordinators.put(new SHA1Hash(coordinator.getInfoHash()), coordinator);
}
public void remove(PeerCoordinator coordinator) {
synchronized (_coordinators) {
_coordinators.remove(coordinator);
}
_coordinators.remove(new SHA1Hash(coordinator.getInfoHash()));
}
/**
* @since 0.9.2
*/
public PeerCoordinator get(byte[] infoHash) {
return _coordinators.get(new SHA1Hash(infoHash));
}
}
......@@ -21,31 +21,47 @@
package org.klomp.snark;
import java.io.IOException;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.Map;
import org.klomp.snark.bencode.*;
import net.i2p.data.Base32;
import net.i2p.data.Base64;
import net.i2p.data.DataHelper;
import net.i2p.data.Destination;
import net.i2p.data.DataFormatException;
public class PeerID implements Comparable
import org.klomp.snark.bencode.BDecoder;
import org.klomp.snark.bencode.BEValue;
import org.klomp.snark.bencode.InvalidBEncodingException;
/**
* Store the address information about a peer.
* Prior to 0.8.1, an instantiation required a peer ID, and full Destination address.
* Starting with 0.8.1, to support compact tracker responses,
* a PeerID can be instantiated with a Destination Hash alone.
* The full destination lookup is deferred until getAddress() is called,
* and the PeerID is not required.
* Equality is now determined solely by the dest hash.
*/
public class PeerID implements Comparable<PeerID>
{
private final byte[] id;
private final Destination address;
private byte[] id;
private Destination address;
private final int port;
private byte[] destHash;
/** whether we have tried to get the dest from the hash - only do once */
private boolean triedDestLookup;
private final int hash;
private final I2PSnarkUtil util;
private String _toStringCache;
public PeerID(byte[] id, Destination address)
{
this.id = id;
this.address = address;
this.port = 6881;
this.port = TrackerClient.PORT;
this.destHash = address.calculateHash().getData();
hash = calculateHash();
util = null;
}
/**
......@@ -61,24 +77,41 @@ public class PeerID implements Comparable
* Creates a PeerID from a Map containing BEncoded peer id, ip and
* port.
*/
public PeerID(Map m)
public PeerID(Map<String, BEValue> m)
throws InvalidBEncodingException, UnknownHostException
{
BEValue bevalue = (BEValue)m.get("peer id");
BEValue bevalue = m.get("peer id");
if (bevalue == null)
throw new InvalidBEncodingException("peer id missing");
id = bevalue.getBytes();
bevalue = (BEValue)m.get("ip");
bevalue = m.get("ip");
if (bevalue == null)
throw new InvalidBEncodingException("ip missing");
address = I2PSnarkUtil.instance().getDestination(bevalue.getString());
address = I2PSnarkUtil.getDestinationFromBase64(bevalue.getString());
if (address == null)
throw new InvalidBEncodingException("Invalid destination [" + bevalue.getString() + "]");
port = 6881;
port = TrackerClient.PORT;
this.destHash = address.calculateHash().getData();
hash = calculateHash();
util = null;
}
/**
* Creates a PeerID from a destHash
* @param util for eventual destination lookup
* @since 0.8.1
*/
public PeerID(byte[] dest_hash, I2PSnarkUtil util) throws InvalidBEncodingException
{
// id and address remain null
port = TrackerClient.PORT;
if (dest_hash.length != 32)
throw new InvalidBEncodingException("bad hash length");
destHash = dest_hash;
hash = DataHelper.hashCode(dest_hash);
this.util = util;
}
public byte[] getID()
......@@ -86,8 +119,26 @@ public class PeerID implements Comparable
return id;
}
public Destination getAddress()
/** for connecting out to peer based on desthash @since 0.8.1 */
public void setID(byte[] xid)
{
id = xid;
}
/**
* Get the destination.
* If this PeerId was instantiated with a destHash,
* and we have not yet done so, lookup the full destination, which may take
* up to 10 seconds.
* @return Dest or null if unknown
*/
public synchronized Destination getAddress()
{
if (address == null && destHash != null && !triedDestLookup) {
String b32 = Base32.encode(destHash) + ".b32.i2p";
address = util.getDestination(b32);
triedDestLookup = true;
}
return address;
}
......@@ -96,17 +147,21 @@ public class PeerID implements Comparable
return port;
}
/** @since 0.8.1 */
public byte[] getDestHash()
{
return destHash;
}
private int calculateHash()
{
int b = 0;
for (int i = 0; i < id.length; i++)
b ^= id[i];
return (b ^ address.hashCode()) ^ port;
return DataHelper.hashCode(destHash);
}
/**
* The hash code of a PeerID is the exclusive or of all id bytes.
* The hash code of a PeerID is the hashcode of the desthash
*/
@Override
public int hashCode()
{
return hash;
......@@ -114,28 +169,24 @@ public class PeerID implements Comparable
/**
* Returns true if and only if this peerID and the given peerID have
* the same 20 bytes as ID.
* the same destination hash
*/
public boolean sameID(PeerID pid)
{
boolean equal = true;
for (int i = 0; equal && i < id.length; i++)
equal = id[i] == pid.id[i];
return equal;
return DataHelper.eq(destHash, pid.getDestHash());
}
/**
* Two PeerIDs are equal when they have the same id, address and port.
* Two PeerIDs are equal when they have the same dest hash
*/
@Override
public boolean equals(Object o)
{
if (o instanceof PeerID)
{
PeerID pid = (PeerID)o;
return port == pid.port
&& address.equals(pid.address)
&& sameID(pid);
return sameID(pid);
}
else
return false;
......@@ -143,11 +194,11 @@ public class PeerID implements Comparable
/**
* Compares port, address and id.
* @deprecated unused? and will NPE now that address can be null?
*/
public int compareTo(Object o)
@Deprecated
public int compareTo(PeerID pid)
{
PeerID pid = (PeerID)o;
int result = port - pid.port;
if (result != 0)
return result;
......@@ -167,10 +218,21 @@ public class PeerID implements Comparable
}
/**
* Returns the String "id@address" where id is the base64 encoded id.
* Returns the String "id@address" where id is the first 4 chars of the base64 encoded id
* and address is the first 6 chars of the base64 dest (was the base64 hash of the dest) which
* should match what the bytemonsoon tracker reports on its web pages.
*/
@Override
public String toString()
{
if (_toStringCache != null)
return _toStringCache;
if (id != null && DataHelper.eq(id, 0, WebPeer.IDBytes, 0, WebPeer.IDBytes.length)) {
_toStringCache = "WebSeed@" + Base32.encode(destHash) + ".b32.i2p";
return _toStringCache;
}
if (id == null || address == null)
return "unkn@" + Base64.encode(destHash).substring(0, 6);
int nonZero = 0;
for (int i = 0; i < id.length; i++) {
if (id[i] != 0) {
......@@ -178,7 +240,8 @@ public class PeerID implements Comparable
break;
}
}
return Base64.encode(id, nonZero, id.length-nonZero).substring(0,4) + "@" + address.calculateHash().toBase64().substring(0,6);
_toStringCache = Base64.encode(id, nonZero, id.length-nonZero).substring(0,4) + "@" + address.toBase64().substring(0,6);
return _toStringCache;
}
/**
......@@ -188,7 +251,7 @@ public class PeerID implements Comparable
{
boolean leading_zeros = true;
StringBuffer sb = new StringBuffer(bs.length*2);
StringBuilder sb = new StringBuilder(bs.length*2);
for (int i = 0; i < bs.length; i++)
{
int c = bs[i] & 0xFF;
......
......@@ -20,10 +20,16 @@
package org.klomp.snark;
import java.util.List;
import net.i2p.data.ByteArray;
import org.klomp.snark.comments.Comment;
/**
* Listener for Peer events.
*/
public interface PeerListener
interface PeerListener
{
/**
* Called when the connection to the peer has started and the
......@@ -93,12 +99,11 @@ public interface PeerListener
* will be closed.
*
* @param peer the Peer that got the piece.
* @param piece the piece number received.
* @param bs the byte array containing the piece.
* @param piece the piece received.
*
* @return true when the bytes represent the piece, false otherwise.
*/
boolean gotPiece(Peer peer, int piece, byte[] bs);
boolean gotPiece(Peer peer, PartialPiece piece);
/**
* Called when the peer wants (part of) a piece from us. Only called
......@@ -107,39 +112,93 @@ public interface PeerListener
*
* @param peer the Peer that wants the piece.
* @param piece the piece number requested.
* @param off byte offset into the piece.
* @param len length of the chunk requested.
*
* @return a byte array containing the piece or null when the piece
* is not available (which is a protocol error).
*/
byte[] gotRequest(Peer peer, int piece);
ByteArray gotRequest(Peer peer, int piece, int off, int len);
/**
* Called when a (partial) piece has been downloaded from the peer.
* Called when we are downloading from the peer and may need to ask for
* a new piece. Returns true if wantPiece() or getPartialPiece() would return a piece.
*
* @param peer the Peer from which size bytes where downloaded.
* @param size the number of bytes that where downloaded.
* @param peer the Peer that will be asked to provide the piece.
* @param bitfield a BitField containing the pieces that the other
* side has.
*
* @return if we want any of what the peer has
* @since 0.8.2
*/
void downloaded(Peer peer, int size);
boolean needPiece(Peer peer, BitField bitfield);
/**
* Called when a (partial) piece has been uploaded to the peer.
* Called when the peer has disconnected and the peer task may have a partially
* downloaded piece that the PeerCoordinator can save
*
* @param peer the Peer to which size bytes where uploaded.
* @param size the number of bytes that where uploaded.
* @param peer the peer
* @since 0.8.2
*/
void uploaded(Peer peer, int size);
void savePartialPieces(Peer peer, List<Request> pcs);
/**
* Called when we are downloading from the peer and need to ask for
* a new piece. Might be called multiple times before
* <code>gotPiece()</code> is called.
* Called when a peer has connected and there may be a partially
* downloaded piece that the coordinatorator can give the peer task
*
* @param peer the Peer that will be asked to provide the piece.
* @param bitfield a BitField containing the pieces that the other
* side has.
* @param havePieces the have-pieces bitmask for the peer
*
* @return request (contains the partial data and valid length)
* @since 0.8.2
*/
PartialPiece getPartialPiece(Peer peer, BitField havePieces);
/**
* Called when an extension message is received.
*
* @param peer the Peer that got the message.
* @param id the message ID
* @param bs the message payload
* @since 0.8.4
*/
void gotExtension(Peer peer, int id, byte[] bs);
/**
* Called when a DHT port message is received.
*
* @param peer the Peer that got the message.
* @param port the query port
* @param rport the response port
* @since 0.8.4
*/
void gotPort(Peer peer, int port, int rport);
/**
* Called when peers are received via PEX
*
* @param peer the Peer that got the message.
* @param pIDList the peer IDs (dest hashes)
* @since 0.8.4
*/
void gotPeers(Peer peer, List<PeerID> pIDList);
/**
* Convenience
* @since 0.9.2
*/
public I2PSnarkUtil getUtil();
/**
* Called when comments are requested via ut_comment
*
* @since 0.9.31
*/
public void gotCommentReq(Peer peer, int num);
/**
* Called when comments are received via ut_comment
*
* @return one of the pieces from the bitfield that we want or -1 if
* we are no longer interested in the peer.
* @since 0.9.31
*/
int wantPiece(Peer peer, BitField bitfield);
public void gotComments(Peer peer, List<Comment> comments);
}
......@@ -20,21 +20,26 @@
package org.klomp.snark;
import java.util.*;
import java.util.Iterator;
import net.i2p.data.DataHelper;
/**
* TimerTask that monitors the peers and total up/download speeds.
* Works together with the main Snark class to report periodical statistics.
*
* @deprecated unused, for command line client only, commented out in Snark.java
*/
class PeerMonitorTask extends TimerTask
@Deprecated
class PeerMonitorTask implements Runnable
{
final static long MONITOR_PERIOD = 10 * 1000; // Ten seconds.
private final long KILOPERSECOND = 1024 * (MONITOR_PERIOD / 1000);
private static final long KILOPERSECOND = 1024 * (MONITOR_PERIOD / 1000);
private final PeerCoordinator coordinator;
private long lastDownloaded = 0;
private long lastUploaded = 0;
//private long lastDownloaded = 0;
//private long lastUploaded = 0;
PeerMonitorTask(PeerCoordinator coordinator)
{
......@@ -43,6 +48,7 @@ class PeerMonitorTask extends TimerTask
public void run()
{
/*****
// Get some statistics
int peers = 0;
int uploaders = 0;
......@@ -82,21 +88,12 @@ class PeerMonitorTask extends TimerTask
// Print some statistics
long downloaded = coordinator.getDownloaded();
String totalDown;
if (downloaded >= 10 * 1024 * 1024)
totalDown = (downloaded / (1024 * 1024)) + "MB";
else
totalDown = (downloaded / 1024 )+ "KB";
String totalDown = DataHelper.formatSize(downloaded) + "B";
long uploaded = coordinator.getUploaded();
String totalUp;
if (uploaded >= 10 * 1024 * 1024)
totalUp = (uploaded / (1024 * 1024)) + "MB";
else
totalUp = (uploaded / 1024) + "KB";
String totalUp = DataHelper.formatSize(uploaded) + "B";
int needP = coordinator.storage.needed();
long needMB
= needP * coordinator.metainfo.getPieceLength(0) / (1024 * 1024);
long needMB = needP * coordinator.metainfo.getPieceLength(0) / (1024 * 1024);
int totalP = coordinator.metainfo.getPieces();
long totalMB = coordinator.metainfo.getTotalLength() / (1024 * 1024);
......@@ -124,5 +121,6 @@ class PeerMonitorTask extends TimerTask
lastDownloaded = downloaded;
lastUploaded = uploaded;
****/
}
}
......@@ -21,34 +21,43 @@
package org.klomp.snark;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.HashSet;
import net.i2p.I2PAppContext;
import net.i2p.data.ByteArray;
import net.i2p.util.Log;
class PeerState
import org.klomp.snark.bencode.BEValue;
import org.klomp.snark.bencode.InvalidBEncodingException;
class PeerState implements DataLoader
{
private Log _log = new Log(PeerState.class);
final Peer peer;
private final Log _log = I2PAppContext.getGlobalContext().logManager().getLog(PeerState.class);
private final Peer peer;
/** Fixme, used by Peer.disconnect() to get to the coordinator */
final PeerListener listener;
final MetaInfo metainfo;
private final BandwidthListener bwListener;
/** Null before we have it. locking: this */
private MetaInfo metainfo;
/** Null unless needed. Contains -1 for all. locking: this */
private List<Integer> havesBeforeMetaInfo;
// Interesting and choking describes whether we are interested in or
// are choking the other side.
boolean interesting = false;
boolean choking = true;
volatile boolean interesting;
volatile boolean choking = true;
// Interested and choked describes whether the other side is
// interested in us or choked us.
boolean interested = false;
boolean choked = true;
// Package local for use by Peer.
long downloaded;
long uploaded;
volatile boolean interested;
volatile boolean choked = true;
/** the pieces the peer has. locking: this */
BitField bitfield;
// Package local for use by Peer.
......@@ -56,26 +65,41 @@ class PeerState
final PeerConnectionOut out;
// Outstanding request
private final List outstandingRequests = new ArrayList();
private Request lastRequest = null;
// If we have te resend outstanding requests (true after we got choked).
private boolean resend = false;
private final List<Request> outstandingRequests = new ArrayList<Request>();
/** the tail (NOT the head) of the request queue */
private Request lastRequest;
private int currentMaxPipeline;
private final static int MAX_PIPELINE = 1;
private final static int PARTSIZE = 64*1024; // default was 16K, i2p-bt uses 64KB
// FIXME if piece size < PARTSIZE, pipeline could be bigger
/** @since 0.9.47 */
public static final int MIN_PIPELINE = 5; // this is for outbound requests
/** @since public since 0.9.47 */
public static final int MAX_PIPELINE = 8; // this is for outbound requests
public final static int PARTSIZE = 16*1024; // outbound request
private final static int MAX_PIPELINE_BYTES = (MAX_PIPELINE + 2) * PARTSIZE; // this is for inbound requests
private final static int MAX_PARTSIZE = 64*1024; // Don't let anybody request more than this
private static final Integer PIECE_ALL = Integer.valueOf(-1);
PeerState(Peer peer, PeerListener listener, MetaInfo metainfo,
/**
* @param metainfo null if in magnet mode
*/
PeerState(Peer peer, PeerListener listener, BandwidthListener bwl, MetaInfo metainfo,
PeerConnectionIn in, PeerConnectionOut out)
{
this.peer = peer;
this.listener = listener;
bwListener = bwl;
this.metainfo = metainfo;
this.in = in;
this.out = out;
}
/**
* @since 0.9.62
*/
BandwidthListener getBandwidthListener() { return bwListener; }
// NOTE Methods that inspect or change the state synchronize (on this).
void keepAliveMessage()
......@@ -90,14 +114,27 @@ class PeerState
if (_log.shouldLog(Log.DEBUG))
_log.debug(peer + " rcv " + (choke ? "" : "un") + "choked");
boolean resend = choked && !choke;
choked = choke;
if (choked)
resend = true;
listener.gotChoke(peer, choke);
if (!choked && interesting)
request();
if (interesting && !choked)
request(resend);
if (choked) {
out.cancelRequestMessages();
// old Roberts thrash us here, choke+unchoke right together
// The only problem with returning the partials to the coordinator
// is that chunks above a missing request are lost.
// Future enhancements to PartialPiece could keep track of the holes.
List<Request> pcs = returnPartialPieces();
if (!pcs.isEmpty()) {
if (_log.shouldLog(Log.DEBUG))
_log.debug(peer + " got choked, returning partial pieces to the PeerCoordinator: " + pcs);
listener.savePartialPieces(this.peer, pcs);
}
}
}
void interestedMessage(boolean interest)
......@@ -114,45 +151,140 @@ class PeerState
if (_log.shouldLog(Log.DEBUG))
_log.debug(peer + " rcv have(" + piece + ")");
// Sanity check
if (piece < 0 || piece >= metainfo.getPieces())
{
// XXX disconnect?
if (_log.shouldLog(Log.WARN))
if (piece < 0) {
if (_log.shouldWarn())
_log.warn("Got strange 'have: " + piece + "' message from " + peer);
return;
}
}
synchronized(this) {
if (metainfo == null) {
if (_log.shouldWarn())
_log.warn("Got HAVE " + piece + " before metainfo from " + peer);
if (bitfield != null) {
if (piece < bitfield.size())
bitfield.set(piece);
} else {
// note reception for later
if (havesBeforeMetaInfo == null) {
havesBeforeMetaInfo = new ArrayList<Integer>(8);
} else if (havesBeforeMetaInfo.size() > 1000) {
// don't blow up
if (_log.shouldWarn())
_log.warn("Got too many haves before metainfo from " + peer);
return;
}
havesBeforeMetaInfo.add(Integer.valueOf(piece));
}
return;
}
// Sanity check
if (piece >= metainfo.getPieces()) {
// XXX disconnect?
if (_log.shouldLog(Log.WARN))
_log.warn("Got strange 'have: " + piece + "' message from " + peer);
return;
}
synchronized(this)
{
// Can happen if the other side never send a bitfield message.
if (bitfield == null)
bitfield = new BitField(metainfo.getPieces());
bitfield = new BitField(metainfo.getPieces());
bitfield.set(piece);
}
}
if (listener.gotHave(peer, piece))
setInteresting(true);
}
void bitfieldMessage(byte[] bitmap)
{
synchronized(this)
{
if (_log.shouldLog(Log.DEBUG))
_log.debug(peer + " rcv bitfield");
void bitfieldMessage(byte[] bitmap) {
bitfieldMessage(bitmap, false);
}
/**
* @param bitmap null to use the isAll param
* @param isAll only if bitmap == null: true for have_all, false for have_none
* @since 0.9.21
*/
private void bitfieldMessage(byte[] bitmap, boolean isAll) {
if (_log.shouldLog(Log.DEBUG)) {
if (bitmap != null)
_log.debug(peer + " rcv bitfield bytes: " + bitmap.length);
else if (isAll)
_log.debug(peer + " rcv bitfield HAVE_ALL");
else
_log.debug(peer + " rcv bitfield HAVE_NONE");
}
synchronized(this) {
if (bitfield != null)
{
// XXX - Be liberal in what you except?
// XXX - Be liberal in what you accept?
if (_log.shouldLog(Log.WARN))
_log.warn("Got unexpected bitfield message from " + peer);
return;
}
// XXX - Check for weird bitfield and disconnect?
bitfield = new BitField(bitmap, metainfo.getPieces());
}
setInteresting(listener.gotBitField(peer, bitfield));
// Will have to regenerate the bitfield after we know exactly
// how many pieces there are, as we don't know how many spare bits there are.
// This happens in setMetaInfo() below.
if (metainfo == null) {
if (bitmap != null) {
bitfield = new BitField(bitmap, bitmap.length * 8);
} else {
if (_log.shouldLog(Log.WARN))
_log.warn("have_x w/o metainfo: " + isAll);
if (isAll) {
// note reception for later
if (havesBeforeMetaInfo == null)
havesBeforeMetaInfo = new ArrayList<Integer>(1);
else
havesBeforeMetaInfo.clear();
havesBeforeMetaInfo.add(PIECE_ALL);
} // else HAVE_NONE, ignore
}
return;
} else {
if (bitmap != null) {
bitfield = new BitField(bitmap, metainfo.getPieces());
} else {
bitfield = new BitField(metainfo.getPieces());
if (isAll)
bitfield.setAll();
}
}
} // synch
boolean interest = listener.gotBitField(peer, bitfield);
if (bitfield.complete() && !interest) {
// They are seeding and we are seeding,
// why did they contact us? (robert)
// Dump them quick before we send our whole bitmap
// If we both support comments, allow it
if (listener.getUtil().utCommentsEnabled()) {
Map<String, BEValue> handshake = peer.getHandshakeMap();
if (handshake != null) {
BEValue bev = handshake.get("m");
if (bev != null) {
try {
if (bev.getMap().get(ExtensionHandler.TYPE_COMMENT) != null) {
if (_log.shouldDebug())
_log.debug("Allowing seed that connects to seeds for comments: " + peer);
setInteresting(false);
return;
}
} catch (InvalidBEncodingException ibee) {}
}
}
}
if (_log.shouldLog(Log.WARN))
_log.warn("Disconnecting seed that connects to seeds: " + peer);
peer.disconnect(true);
} else {
setInteresting(interest);
}
}
void requestMessage(int piece, int begin, int length)
......@@ -160,20 +292,29 @@ class PeerState
if (_log.shouldLog(Log.DEBUG))
_log.debug(peer + " rcv request("
+ piece + ", " + begin + ", " + length + ") ");
if (choking)
{
if (_log.shouldLog(Log.INFO))
_log.info("Request received, but choking " + peer);
if (metainfo == null)
return;
}
if (choking) {
if (peer.supportsFast()) {
if (_log.shouldInfo())
_log.info("Request received, sending reject to choked " + peer);
out.sendReject(piece, begin, length);
} else {
if (_log.shouldInfo())
_log.info("Request received, but choking " + peer);
}
return;
}
// Sanity check
// There is no check here that we actually have the piece;
// this will be caught in loadData() below
if (piece < 0
|| piece >= metainfo.getPieces()
|| begin < 0
|| begin > metainfo.getPieceLength(piece)
|| length <= 0
|| length > 4*PARTSIZE)
|| length > MAX_PARTSIZE)
{
// XXX - Protocol error -> disconnect?
if (_log.shouldLog(Log.WARN))
......@@ -181,20 +322,56 @@ class PeerState
+ ", " + begin
+ ", " + length
+ "' message from " + peer);
if (peer.supportsFast())
out.sendReject(piece, begin, length);
return;
}
byte[] pieceBytes = listener.gotRequest(peer, piece);
// Limit total pipelined requests to MAX_PIPELINE bytes
// to conserve memory and prevent DOS
// Todo: limit number of requests also? (robert 64 x 4KB)
if (out.queuedBytes() + length > MAX_PIPELINE_BYTES)
{
if (peer.supportsFast()) {
if (_log.shouldWarn())
_log.warn("Rejecting request over pipeline limit from " + peer);
out.sendReject(piece, begin, length);
} else {
if (_log.shouldWarn())
_log.warn("Discarding request over pipeline limit from " + peer);
}
return;
}
if (_log.shouldLog(Log.DEBUG))
_log.debug("Queueing (" + piece + ", " + begin + ", "
+ length + ")" + " to " + peer);
// don't load the data into mem now, let PeerConnectionOut do it
out.sendPiece(piece, begin, length, this);
}
/**
* This is the callback that PeerConnectionOut calls
*
* @return bytes or null for errors such as not having the piece yet
* @throws RuntimeException on IOE getting the data
* @since 0.8.2
*/
public ByteArray loadData(int piece, int begin, int length) {
ByteArray pieceBytes = listener.gotRequest(peer, piece, begin, length);
if (pieceBytes == null)
{
// XXX - Protocol error-> diconnect?
if (_log.shouldLog(Log.WARN))
_log.warn("Got request for unknown piece: " + piece);
return;
if (peer.supportsFast())
out.sendReject(piece, begin, length);
return null;
}
// More sanity checks
if (begin >= pieceBytes.length || begin + length > pieceBytes.length)
if (length != pieceBytes.getData().length)
{
// XXX - Protocol error-> disconnect?
if (_log.shouldLog(Log.WARN))
......@@ -202,13 +379,15 @@ class PeerState
+ ", " + begin
+ ", " + length
+ "' message from " + peer);
return;
if (peer.supportsFast())
out.sendReject(piece, begin, length);
return null;
}
if (_log.shouldLog(Log.INFO))
_log.info("Sending (" + piece + ", " + begin + ", "
if (_log.shouldLog(Log.DEBUG))
_log.debug("Sending (" + piece + ", " + begin + ", "
+ length + ")" + " to " + peer);
out.sendPiece(piece, begin, length, pieceBytes);
return pieceBytes;
}
/**
......@@ -217,86 +396,134 @@ class PeerState
*/
void uploaded(int size)
{
uploaded += size;
listener.uploaded(peer, size);
peer.uploaded(size);
}
// This is used to flag that we have to back up from the firstOutstandingRequest
// when calculating how far we've gotten
private Request pendingRequest;
/**
* Called when a partial piece request has been handled by
* Called when a full chunk (i.e. a piece message) has been received by
* PeerConnectionIn.
*
* This may block quite a while if it is the last chunk for a piece,
* as it calls the listener, who stores the piece and then calls
* havePiece for every peer on the torrent (including us).
*
*/
void pieceMessage(Request req)
{
int size = req.len;
downloaded += size;
listener.downloaded(peer, size);
// Now reported byte-by-byte in PartialPiece
//peer.downloaded(size);
if (_log.shouldLog(Log.DEBUG))
_log.debug("got end of Chunk("
+ req.getPiece() + "," + req.off + "," + req.len + ") from "
+ peer);
// Last chunk needed for this piece?
if (getFirstOutstandingRequest(req.piece) == -1)
PartialPiece pp = req.getPartialPiece();
boolean complete = pp.isComplete();
if (complete)
{
if (listener.gotPiece(peer, req.piece, req.bs))
// warning - may block here for a while
if (listener.gotPiece(peer, pp))
{
if (_log.shouldLog(Log.DEBUG))
_log.debug("Got " + req.piece + ": " + peer);
_log.debug("Got " + req.getPiece() + ": " + peer);
}
else
{
if (_log.shouldLog(Log.WARN))
_log.warn("Got BAD " + req.piece + " from " + peer);
// XXX ARGH What now !?!
downloaded = 0;
_log.warn("Got BAD " + req.getPiece() + " from " + peer);
synchronized(this) {
// so we don't ask again
if (bitfield != null)
bitfield.clear(req.getPiece());
}
}
}
// ok done with this one
synchronized(this) {
pendingRequest = null;
}
// getOutstandingRequest() was called by PeerConnectionIn at the start of the chunk;
// if the bandwidth limiter throttled us to zero requests then, try again now
if (outstandingRequests.isEmpty()) {
addRequest();
if (!complete) {
synchronized(this) {
if (outstandingRequests.isEmpty()) {
// we MUST return the partial piece to PeerCoordinator,
// or else we will lose it and leak the data
if (_log.shouldWarn())
_log.warn("Throttled, returned to coord. w/ data " + req);
List<Request> pcs = Collections.singletonList(req);
listener.savePartialPieces(this.peer, pcs);
lastRequest = null;
}
}
}
}
}
/**
* TODO this is how we tell we got all the chunks in pieceMessage() above.
*
*
* @return index in outstandingRequests or -1
*/
synchronized private int getFirstOutstandingRequest(int piece)
{
{
for (int i = 0; i < outstandingRequests.size(); i++)
if (((Request)outstandingRequests.get(i)).piece == piece)
if (outstandingRequests.get(i).getPiece() == piece)
return i;
return -1;
}
/**
* Called when a piece message is being processed by the incoming
* connection. Returns null when there was no such request. It also
* connection. That is, when the header of the piece message was received.
* Returns null when there was no such request. It also
* requeues/sends requests when it thinks that they must have been
* lost.
*/
Request getOutstandingRequest(int piece, int begin, int length)
{
if (_log.shouldLog(Log.DEBUG))
_log.debug("getChunk("
+ piece + "," + begin + "," + length + ") "
_log.debug("got start of Chunk("
+ piece + "," + begin + "," + length + ") from "
+ peer);
int r = getFirstOutstandingRequest(piece);
// Unrequested piece number?
if (r == -1)
{
if (_log.shouldLog(Log.INFO))
_log.info("Unrequested 'piece: " + piece + ", "
+ begin + ", " + length + "' received from "
+ peer);
downloaded = 0; // XXX - punishment?
return null;
}
// Lookup the correct piece chunk request from the list.
Request req;
synchronized(this)
{
req = (Request)outstandingRequests.get(r);
while (req.piece == piece && req.off != begin
int r = getFirstOutstandingRequest(piece);
// Unrequested piece number?
if (r == -1) {
if (_log.shouldLog(Log.INFO))
_log.info("Unrequested 'piece: " + piece + ", "
+ begin + ", " + length + "' received from "
+ peer);
return null;
}
req = outstandingRequests.get(r);
while (req.getPiece() == piece && req.off != begin
&& r < outstandingRequests.size() - 1)
{
r++;
req = (Request)outstandingRequests.get(r);
req = outstandingRequests.get(r);
}
// Something wrong?
if (req.piece != piece || req.off != begin || req.len != length)
if (req.getPiece() != piece || req.off != begin || req.len != length)
{
if (_log.shouldLog(Log.INFO))
_log.info("Unrequested or unneeded 'piece: "
......@@ -304,9 +531,11 @@ class PeerState
+ begin + ", "
+ length + "' received from "
+ peer);
downloaded = 0; // XXX - punishment?
return null;
}
// note that this request is being read
pendingRequest = req;
// Report missing requests.
if (r != 0)
......@@ -316,16 +545,10 @@ class PeerState
+ ", wanted for peer: " + peer);
for (int i = 0; i < r; i++)
{
Request dropReq = (Request)outstandingRequests.remove(0);
Request dropReq = outstandingRequests.remove(0);
outstandingRequests.add(dropReq);
// We used to rerequest the missing chunks but that mostly
// just confuses the other side. So now we just keep
// waiting for them. They will be rerequested when we get
// choked/unchoked again.
/*
if (!choked)
if (!choked)
out.sendRequest(dropReq);
*/
if (_log.shouldLog(Log.WARN))
_log.warn("dropped " + dropReq + " with peer " + peer);
}
......@@ -340,6 +563,62 @@ class PeerState
}
/**
* @return lowest offset of any request for the piece
* @since 0.8.2
*/
synchronized private Request getLowestOutstandingRequest(int piece) {
Request rv = null;
int lowest = Integer.MAX_VALUE;
for (Request r : outstandingRequests) {
if (r.getPiece() == piece && r.off < lowest) {
lowest = r.off;
rv = r;
}
}
if (pendingRequest != null &&
pendingRequest.getPiece() == piece && pendingRequest.off < lowest)
rv = pendingRequest;
if (_log.shouldLog(Log.DEBUG))
_log.debug(peer + " lowest for " + piece + " is " + rv + " out of " + pendingRequest + " and " + outstandingRequests);
return rv;
}
/**
* Get partial pieces, give them back to PeerCoordinator.
* Clears the request queue.
* @return List of PartialPieces, even those with an offset == 0, or empty list
* @since 0.8.2
*/
synchronized List<Request> returnPartialPieces()
{
Set<Integer> pcs = getRequestedPieces();
List<Request> rv = new ArrayList<Request>(pcs.size());
for (Integer p : pcs) {
Request req = getLowestOutstandingRequest(p.intValue());
if (req != null)
rv.add(req);
}
outstandingRequests.clear();
pendingRequest = null;
lastRequest = null;
return rv;
}
/**
* @return all pieces we are currently requesting, or empty Set
*/
synchronized private Set<Integer> getRequestedPieces() {
Set<Integer> rv = new HashSet<Integer>(outstandingRequests.size() + 1);
for (Request req : outstandingRequests) {
rv.add(Integer.valueOf(req.getPiece()));
if (pendingRequest != null)
rv.add(Integer.valueOf(pendingRequest.getPiece()));
}
return rv;
}
void cancelMessage(int piece, int begin, int length)
{
if (_log.shouldLog(Log.DEBUG))
......@@ -348,6 +627,145 @@ class PeerState
out.cancelRequest(piece, begin, length);
}
/** @since 0.8.2 */
void extensionMessage(int id, byte[] bs)
{
if (metainfo != null && metainfo.isPrivate() &&
(id == ExtensionHandler.ID_METADATA || id == ExtensionHandler.ID_PEX)) {
// shouldn't get this since we didn't advertise it but they could send it anyway
if (_log.shouldLog(Log.WARN))
_log.warn("Private torrent, ignoring ext msg " + id);
return;
}
ExtensionHandler.handleMessage(peer, listener, id, bs);
// Peer coord will get metadata from MagnetState,
// verify, and then call gotMetaInfo()
listener.gotExtension(peer, id, bs);
}
/**
* Switch from magnet mode to normal mode.
* If we already have the metainfo, this does nothing.
* @param meta non-null
* @since 0.8.4
*/
public synchronized void setMetaInfo(MetaInfo meta) {
if (metainfo != null)
return;
if (bitfield != null) {
if (bitfield.size() != meta.getPieces())
// fix bitfield, it was too big by 1-7 bits
bitfield = new BitField(bitfield.getFieldBytes(), meta.getPieces());
// else no extra
} else if (havesBeforeMetaInfo != null) {
// initialize it now
bitfield = new BitField(meta.getPieces());
} else {
// it will be initialized later
//bitfield = new BitField(meta.getPieces());
}
metainfo = meta;
if (bitfield != null) {
if (havesBeforeMetaInfo != null) {
// set all 'haves' we got before the metainfo in the bitfield
for (Integer i : havesBeforeMetaInfo) {
if (i.equals(PIECE_ALL)) {
bitfield.setAll();
if (_log.shouldLog(Log.WARN))
_log.warn("set have_all after rcv metainfo");
break;
}
int piece = i.intValue();
if (piece >= 0 && piece < meta.getPieces())
bitfield.set(piece);
if (_log.shouldLog(Log.WARN))
_log.warn("set have " + piece + " after rcv metainfo");
}
havesBeforeMetaInfo = null;
}
if (bitfield.count() > 0)
setInteresting(true);
}
}
/**
* Unused
* @since 0.8.4
*/
void portMessage(int port)
{
// for compatibility with old DHT PORT message
listener.gotPort(peer, port, port + 1);
}
/////////// fast message handlers /////////
/**
* BEP 6
* Treated as "have" for now
* @since 0.9.21
*/
void suggestMessage(int piece) {
if (_log.shouldInfo())
_log.info("Handling suggest as have(" + piece + ") from " + peer);
haveMessage(piece);
}
/**
* BEP 6
* @param isAll true for have_all, false for have_none
* @since 0.9.21
*/
void haveMessage(boolean isAll) {
bitfieldMessage(null, isAll);
}
/**
* BEP 6
*
* @since 0.9.21
*/
void rejectMessage(int piece, int begin, int length) {
if (_log.shouldInfo())
_log.info("Got reject(" + piece + ',' + begin + ',' + length + ") from " + peer);
out.cancelRequest(piece, begin, length);
synchronized(this) {
Request deletedRequest = null;
// for this piece only
boolean haveMoreRequests = false;
for (Iterator<Request> iter = outstandingRequests.iterator(); iter.hasNext(); ) {
Request req = iter.next();
if (req.getPiece() == piece) {
if (req.off == begin && req.len == length) {
iter.remove();
deletedRequest = req;
} else {
haveMoreRequests = true;
}
}
}
if (deletedRequest != null && !haveMoreRequests) {
List<Request> pcs = Collections.singletonList(deletedRequest);
listener.savePartialPieces(this.peer, pcs);
if (_log.shouldWarn())
_log.warn("Returned to coord. w/ data " + deletedRequest.getPartialPiece().getDownloaded() + " due to reject(" + piece + ',' + begin + ',' + length + ") from " + peer);
}
if (lastRequest != null && lastRequest.getPiece() == piece &&
lastRequest.off == begin && lastRequest.len == length)
lastRequest = null;
}
}
/**
* BEP 6
* Ignored for now
* @since 0.9.21
*/
void allowedFastMessage(int piece) {
if (_log.shouldInfo())
_log.info("Ignoring allowed_fast(" + piece + ") from " + peer);
}
void unknownMessage(int type, byte[] bs)
{
if (_log.shouldLog(Log.WARN))
......@@ -355,56 +773,93 @@ class PeerState
+ " length: " + bs.length);
}
/////////// end message handlers /////////
/**
* We now have this piece.
* Tell the peer and cancel any requests for the piece.
*/
void havePiece(int piece)
{
if (_log.shouldLog(Log.DEBUG))
_log.debug("Tell " + peer + " havePiece(" + piece + ")");
synchronized(this)
{
// Tell the other side that we are no longer interested in any of
// the outstanding requests for this piece.
if (lastRequest != null && lastRequest.piece == piece)
lastRequest = null;
Iterator it = outstandingRequests.iterator();
while (it.hasNext())
{
Request req = (Request)it.next();
if (req.piece == piece)
{
it.remove();
// Send cancel even when we are choked to make sure that it is
// really never ever send.
out.sendCancel(req);
}
}
}
cancelPiece(piece);
// Tell the other side that we really have this piece.
out.sendHave(piece);
// Request something else if necessary.
addRequest();
/**** taken care of in addRequest()
synchronized(this)
{
// Is the peer still interesting?
if (lastRequest == null)
setInteresting(false);
}
****/
}
// Starts or resumes requesting pieces.
private void request()
/**
* Tell the other side that we are no longer interested in any of
* the outstanding requests (if any) for this piece.
* @since 0.8.1
*/
synchronized void cancelPiece(int piece) {
if (lastRequest != null && lastRequest.getPiece() == piece)
lastRequest = null;
Iterator<Request> it = outstandingRequests.iterator();
while (it.hasNext())
{
Request req = it.next();
if (req.getPiece() == piece)
{
it.remove();
// Send cancel even when we are choked to make sure that it is
// really never ever send.
out.sendCancel(req);
req.getPartialPiece().release();
}
}
}
/**
* Are we currently requesting the piece?
* @deprecated deadlocks
* @since 0.8.1
*/
@Deprecated
synchronized boolean isRequesting(int piece) {
if (pendingRequest != null && pendingRequest.getPiece() == piece)
return true;
for (Request req : outstandingRequests) {
if (req.getPiece() == piece)
return true;
}
return false;
}
/**
* Starts or resumes requesting pieces.
* @param resend should we resend outstanding requests?
*/
private void request(boolean resend)
{
// Are there outstanding requests that have to be resend?
if (resend)
{
synchronized (this) {
out.sendRequests(outstandingRequests);
if (!outstandingRequests.isEmpty()) {
out.sendRequests(outstandingRequests);
if (_log.shouldLog(Log.DEBUG))
_log.debug("Resending requests to " + peer + outstandingRequests);
}
}
resend = false;
}
// Add/Send some more requests if necessary.
......@@ -413,114 +868,219 @@ class PeerState
/**
* Adds a new request to the outstanding requests list.
* Then send interested if we weren't.
* Then send new requests if not choked.
* If nothing to request, send not interested if we were.
*
* This is called from several places:
*<pre>
* By getOutstandingRequest() when the first part of a chunk comes in
* By havePiece() when somebody got a new piece completed
* By chokeMessage() when we receive an unchoke
* By setInteresting() when we are now interested
* By PeerCoordinator.updatePiecePriorities()
*</pre>
*/
private void addRequest()
void addRequest()
{
boolean more_pieces = true;
while (more_pieces)
{
synchronized(this)
{
more_pieces = outstandingRequests.size() < MAX_PIPELINE;
}
// We want something and we don't have outstanding requests?
if (more_pieces && lastRequest == null)
more_pieces = requestNextPiece();
else if (more_pieces) // We want something
// no bitfield yet? nothing to request then.
if (bitfield == null)
return;
if (metainfo == null)
return;
// Initial bw check. We do the actual accounting in PeerConnectionOut.
// Implement a simple AIMD slow-start on the request queue size with
// currentMaxPipeline counter.
// Avoid cross-peer deadlocks from PeerCoordinator, call this outside the lock
if (!bwListener.shouldRequest(peer, 0)) {
synchronized(this) {
// Due to changes elsewhere we can let this go down to zero now
currentMaxPipeline /= 2;
}
if (_log.shouldWarn())
_log.warn(peer + " throttle request, interesting? " + interesting + " choked? " + choked +
" reqq: " + outstandingRequests.size() + " maxp: " + currentMaxPipeline);
return;
}
synchronized(this) {
// adjust currentMaxPipeline
long rate = bwListener.getDownloadRate();
long limit = bwListener.getDownBWLimit();
if (rate < limit * 7 / 10) {
if (currentMaxPipeline < peer.getMaxPipeline())
currentMaxPipeline++;
} else if (rate > limit * 9 / 10) {
currentMaxPipeline = 1;
} else if (currentMaxPipeline < 2) {
currentMaxPipeline++;
}
boolean more_pieces = true;
while (more_pieces)
{
int pieceLength;
boolean isLastChunk;
synchronized(this)
{
pieceLength = metainfo.getPieceLength(lastRequest.piece);
isLastChunk = lastRequest.off + lastRequest.len == pieceLength;
more_pieces = outstandingRequests.size() < currentMaxPipeline;
// We want something and we don't have outstanding requests?
if (more_pieces && lastRequest == null) {
// we have nothing in the queue right now
if (!interesting) {
// If we need something, set interesting but delay pulling
// a request from the PeerCoordinator until unchoked.
if (listener.needPiece(this.peer, bitfield)) {
setInteresting(true);
if (_log.shouldLog(Log.DEBUG))
_log.debug(peer + " addRequest() we need something, setting interesting, delaying requestNextPiece()");
} else {
if (_log.shouldLog(Log.DEBUG))
_log.debug(peer + " addRequest() needs nothing");
}
return;
}
if (choked) {
// If choked, delay pulling
// a request from the PeerCoordinator until unchoked.
if (_log.shouldLog(Log.DEBUG))
_log.debug(peer + " addRequest() we are choked, delaying requestNextPiece()");
return;
}
// Last part of a piece?
if (isLastChunk)
more_pieces = requestNextPiece();
else
} else if (more_pieces) // We want something
{
synchronized(this)
{
int nextPiece = lastRequest.piece;
int pieceLength;
boolean isLastChunk;
pieceLength = metainfo.getPieceLength(lastRequest.getPiece());
isLastChunk = lastRequest.off + lastRequest.len == pieceLength;
// Last part of a piece?
if (isLastChunk) {
more_pieces = requestNextPiece();
} else {
PartialPiece nextPiece = lastRequest.getPartialPiece();
int nextBegin = lastRequest.off + PARTSIZE;
byte[] bs = lastRequest.bs;
int maxLength = pieceLength - nextBegin;
int nextLength = maxLength > PARTSIZE ? PARTSIZE
: maxLength;
Request req
= new Request(nextPiece, bs, nextBegin, nextLength);
outstandingRequests.add(req);
if (!choked)
out.sendRequest(req);
lastRequest = req;
}
while (true) {
// don't rerequest chunks we already have
if (!nextPiece.hasChunk(nextBegin / PARTSIZE)) {
int maxLength = pieceLength - nextBegin;
int nextLength = maxLength > PARTSIZE ? PARTSIZE
: maxLength;
Request req = new Request(nextPiece,nextBegin, nextLength);
outstandingRequests.add(req);
if (!choked)
out.sendRequest(req);
lastRequest = req;
break;
} else {
nextBegin += PARTSIZE;
if (nextBegin >= pieceLength) {
more_pieces = requestNextPiece();
break;
}
}
}
}
}
}
}
// failsafe
// However this is bad as it thrashes the peer when we change our mind
// Ticket 691 cause here?
if (interesting && lastRequest == null && outstandingRequests.isEmpty())
setInteresting(false);
if (_log.shouldLog(Log.DEBUG))
_log.debug(peer + " requests " + outstandingRequests);
}
// Starts requesting first chunk of next piece. Returns true if
// something has been added to the requests, false otherwise.
/**
* Starts requesting first chunk of next piece. Returns true if
* something has been added to the requests, false otherwise.
* Caller should synchronize.
*/
private boolean requestNextPiece()
{
// Check that we already know what the other side has.
if (bitfield != null)
{
int nextPiece = listener.wantPiece(peer, bitfield);
if (_log.shouldLog(Log.DEBUG))
_log.debug(peer + " want piece " + nextPiece);
synchronized(this)
{
if (nextPiece != -1
&& (lastRequest == null || lastRequest.piece != nextPiece))
{
int piece_length = metainfo.getPieceLength(nextPiece);
byte[] bs = new byte[piece_length];
int length = Math.min(piece_length, PARTSIZE);
Request req = new Request(nextPiece, bs, 0, length);
outstandingRequests.add(req);
if (bitfield != null) {
// Check for adopting an orphaned partial piece
PartialPiece pp = listener.getPartialPiece(peer, bitfield);
if (pp != null) {
// Double-check that r not already in outstandingRequests
if (!getRequestedPieces().contains(Integer.valueOf(pp.getPiece()))) {
Request r = pp.getRequest();
outstandingRequests.add(r);
if (!choked)
out.sendRequest(req);
lastRequest = req;
out.sendRequest(r);
lastRequest = r;
return true;
}
}
}
} else {
if (_log.shouldLog(Log.WARN))
_log.warn("Got dup from coord: " + pp);
pp.release();
}
}
}
// failsafe
// However this is bad as it thrashes the peer when we change our mind
// Ticket 691 cause here?
if (outstandingRequests.isEmpty())
lastRequest = null;
// If we are not in the end game, we may run out of things to request
// because we are asking other peers. Set not-interesting now rather than
// wait for those other requests to be satisfied via havePiece()
if (interesting && lastRequest == null) {
interesting = false;
out.sendInterest(false);
if (_log.shouldLog(Log.DEBUG))
_log.debug(peer + " nothing more to request, now uninteresting");
}
return false;
}
synchronized void setInteresting(boolean interest)
{
if (_log.shouldLog(Log.DEBUG))
_log.debug(peer + " setInteresting(" + interest + ")");
if (interest != interesting)
{
if (_log.shouldLog(Log.DEBUG))
_log.debug(peer + " setInteresting(" + interest + ")");
interesting = interest;
out.sendInterest(interest);
if (interesting && !choked)
request();
request(true); // we shouldnt have any pending requests, but if we do, resend them
}
}
synchronized void setChoking(boolean choke)
{
if (_log.shouldLog(Log.DEBUG))
_log.debug(peer + " setChoking(" + choke + ")");
if (choking != choke)
{
if (_log.shouldLog(Log.DEBUG))
_log.debug(peer + " setChoking(" + choke + ")");
choking = choke;
out.sendChoke(choke);
}
}
void keepAlive()
{
out.sendAlive();
}
synchronized void retransmitRequests()
{
if (interesting && !choked)
out.retransmitRequests(outstandingRequests);
}
/**
* debug
* @return string or null
* @since 0.8.1
*/
synchronized String getRequests() {
if (outstandingRequests.isEmpty())
return null;
else
return outstandingRequests.toString();
}
}
package org.klomp.snark;
import java.util.Set;
import java.util.HashSet;
import java.util.Collections;
import java.util.Set;
public class Piece implements Comparable {
/**
* This class is used solely by PeerCoordinator.
* Caller must synchronize on many of these methods.
*/
class Piece implements Comparable<Piece> {
private int id;
private Set peers;
private boolean requested;
private final int id;
private final Set<PeerID> peers;
/** @since 0.8.3 */
private volatile Set<PeerID> requests;
/** @since 0.8.1 */
private int priority;
public Piece(int id) {
this.id = id;
this.peers = Collections.synchronizedSet(new HashSet());
this.requested = false;
this.peers = new HashSet<PeerID>(I2PSnarkUtil.MAX_CONNECTIONS / 2);
// defer creating requests to save memory
}
public int compareTo(Object o) throws ClassCastException {
return this.peers.size() - ((Piece)o).peers.size();
/**
* Highest priority first,
* then rarest first
*/
public int compareTo(Piece op) {
int pdiff = op.priority - this.priority; // reverse
if (pdiff != 0)
return pdiff;
return this.peers.size() - op.peers.size();
}
@Override
public boolean equals(Object o) {
if (o == null) return false;
try {
if (o instanceof Piece) {
return this.id == ((Piece)o).id;
} catch (ClassCastException cce) {
return false;
}
return false;
}
@Override
public int hashCode() {
int hash = 5;
hash = 31 * hash + this.id;
return hash;
}
public int getId() { return this.id; }
public Set getPeers() { return this.peers; }
/** caller must synchronize */
public boolean addPeer(Peer peer) { return this.peers.add(peer.getPeerID()); }
/**
* Caller must synchronize.
* @return true if removed
*/
public boolean removePeer(Peer peer) { return this.peers.remove(peer.getPeerID()); }
public boolean isRequested() { return this.requested; }
public void setRequested(boolean requested) { this.requested = requested; }
/**
* How many peers have this piece?
* Caller must synchronize
* @since 0.9.1
*/
public int getPeerCount() {
return this.peers.size();
}
/** caller must synchronize */
public boolean isRequested() {
return this.requests != null && !this.requests.isEmpty();
}
/**
* Since 0.8.3, keep track of who is requesting here,
* to avoid deadlocks from querying each peer.
* Caller must synchronize
*/
public void setRequested(Peer peer, boolean requested) {
if (requested) {
if (this.requests == null)
this.requests = new HashSet<PeerID>(2);
this.requests.add(peer.getPeerID());
} else {
if (this.requests != null)
this.requests.remove(peer.getPeerID());
}
}
/**
* Is peer requesting this piece?
* Caller must synchronize
* @since 0.8.3
*/
public boolean isRequestedBy(Peer peer) {
return this.requests != null && this.requests.contains(peer.getPeerID());
}
/**
* How many peers are requesting this piece?
* Caller must synchronize
* @since 0.8.3
*/
public int getRequestCount() {
return this.requests == null ? 0 : this.requests.size();
}
/**
* Clear all knowledge of peers
* Caller must synchronize
* @since 0.9.3
*/
public void clear() {
peers.clear();
if (requests != null)
requests.clear();
}
/** @return default 0 @since 0.8.1 */
public int getPriority() { return this.priority; }
/** @since 0.8.1 */
public void setPriority(int p) { this.priority = p; }
/** @since 0.8.1 */
public boolean isDisabled() { return this.priority < 0; }
/** @since 0.8.1 */
public void setDisabled() { this.priority = -1; }
@Override
public String toString() {
return String.valueOf(id);
}
......
......@@ -20,54 +20,103 @@
package org.klomp.snark;
import java.io.DataInputStream;
import java.io.IOException;
/**
* Holds all information needed for a partial piece request.
* This class should be used only by PeerState, PeerConnectionIn, and PeerConnectionOut.
*/
class Request
{
final int piece;
final byte[] bs;
private final PartialPiece piece;
final int off;
final int len;
long sendTime;
/**
* Creates a new Request.
*
* @param piece Piece number requested.
* @param bs byte array where response should be stored.
* @param off the offset in the array.
* @param len the number of bytes requested.
*/
Request(int piece, byte[] bs, int off, int len)
Request(PartialPiece piece, int off, int len)
{
this.piece = piece;
this.bs = bs;
this.off = off;
this.len = len;
// Sanity check
if (piece < 0 || off < 0 || len <= 0 || off + len > bs.length)
if (off < 0 || len <= 0 || off + len > piece.getLength())
throw new IndexOutOfBoundsException("Illegal Request " + toString());
}
/**
* Dummy Request for PeerState.returnPartialPieces().
* len will be zero.
*
* @param piece Piece number requested.
* @param off the offset in the array.
* @since 0.9.36
*/
Request(PartialPiece piece, int off)
{
this.piece = piece;
this.off = off;
this.len = 0;
// Sanity check
if (off < 0 || off > piece.getLength())
throw new IndexOutOfBoundsException("Illegal Request " + toString());
}
/**
* @since 0.9.1
*/
public void read(DataInputStream din, BandwidthListener bwl) throws IOException {
piece.read(din, off, len, bwl);
}
/**
* The piece number this Request is for
*
* @since 0.9.1
*/
public int getPiece() {
return piece.getPiece();
}
/**
* The PartialPiece this Request is for
*
* @since 0.9.1
*/
public PartialPiece getPartialPiece() {
return piece;
}
@Override
public int hashCode()
{
return piece ^ off ^ len;
return piece.getPiece() ^ off ^ len;
}
@Override
public boolean equals(Object o)
{
if (o instanceof Request)
{
Request req = (Request)o;
return req.piece == piece && req.off == off && req.len == len;
return req.piece.equals(piece) && req.off == off && req.len == len;
}
return false;
}
@Override
public String toString()
{
return "(" + piece + "," + off + "," + len + ")";
return "(" + piece.getPiece() + "," + off + "," + len + ")";
}
}
......@@ -20,16 +20,25 @@
package org.klomp.snark;
import java.io.*;
import java.net.*;
import java.util.*;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.Collections;
import java.util.List;
import java.util.Properties;
import java.util.StringTokenizer;
import java.util.concurrent.atomic.AtomicInteger;
import org.klomp.snark.bencode.*;
import net.i2p.data.Destination;
import net.i2p.client.streaming.I2PSocket;
import net.i2p.I2PAppContext;
import net.i2p.client.streaming.I2PServerSocket;
import net.i2p.util.I2PThread;
import net.i2p.data.Destination;
import net.i2p.util.Log;
import net.i2p.util.SecureFile;
import org.klomp.snark.comments.Comment;
import org.klomp.snark.comments.CommentSet;
/**
* Main Snark program startup class.
......@@ -42,34 +51,12 @@ public class Snark
private final static int MIN_PORT = 6881;
private final static int MAX_PORT = 6889;
// Error messages (non-fatal)
public final static int ERROR = 1;
// Warning messages
public final static int WARNING = 2;
// Notices (peer level)
public final static int NOTICE = 3;
// Info messages (protocol policy level)
public final static int INFO = 4;
// Debug info (protocol level)
public final static int DEBUG = 5;
// Very low level stuff (network level)
public final static int ALL = 6;
/**
* What level of debug info to show.
*/
public static int debug = NOTICE;
// Whether or not to ask the user for commands while sharing
private static boolean command_interpreter = true;
//private static boolean command_interpreter = true;
private static final String newline = System.getProperty("line.separator");
/****
private static final String copyright =
"The Hunting of the Snark Project - Copyright (C) 2003 Mark J. Wielaard"
+ newline + newline
......@@ -85,23 +72,26 @@ public class Snark
"Press return for help. Type \"quit\" and return to stop.";
private static final String help =
"Commands: 'info', 'list', 'quit'.";
****/
// String indicating main activity
String activity = "Not started";
/****
private static class OOMListener implements I2PThread.OOMEventListener {
public void outOfMemory(OutOfMemoryError err) {
try {
err.printStackTrace();
I2PSnarkUtil.instance().debug("OOM in the snark", Snark.ERROR, err);
System.out.println("OOM in the snark" + err);
} catch (Throwable t) {
System.out.println("OOM in the OOM");
}
System.exit(0);
//System.exit(0);
}
}
****/
/******** No, not maintaining a command-line client
public static void main(String[] args)
{
System.out.println(copyright);
......@@ -216,96 +206,180 @@ public class Snark
}
catch(IOException ioe)
{
debug("ERROR while reading stdin: " + ioe, ERROR);
System.out.println("ERROR while reading stdin: " + ioe);
}
// Explicit shutdown.
Runtime.getRuntime().removeShutdownHook(snarkhook);
//Runtime.getRuntime().removeShutdownHook(snarkhook);
snarkhook.start();
}
}
public String torrent;
public MetaInfo meta;
public Storage storage;
public PeerCoordinator coordinator;
public ConnectionAcceptor acceptor;
public TrackerClient trackerclient;
public String rootDataDir = ".";
public CompleteListener completeListener;
public boolean stopped;
***********/
/** max connections */
public static final String PROP_MAX_CONNECTIONS = "i2psnark.maxConnections";
Snark(String torrent, String ip, int user_port,
/** most of these used to be public, use accessors below instead */
private String torrent;
private MetaInfo meta;
private Storage storage;
private PeerCoordinator coordinator;
private ConnectionAcceptor acceptor;
private TrackerClient trackerclient;
private final File rootDataDir;
private final CompleteListener completeListener;
private volatile boolean stopped;
private volatile boolean starting;
private final byte[] id;
private final byte[] infoHash;
private String additionalTrackerURL;
protected final I2PSnarkUtil _util;
private final Log _log;
private final PeerCoordinatorSet _peerCoordinatorSet;
private volatile String trackerProblems;
private volatile int trackerSeenPeers;
private volatile boolean _autoStoppable;
// String indicating main activity
private volatile String activity = "Not started";
private long savedUploaded;
private long _startedTime;
private CommentSet _comments;
private final Object _commentLock = new Object();
private static final AtomicInteger __RPCID = new AtomicInteger();
private final int _rpcID = __RPCID.incrementAndGet();
/**
* from main() via parseArguments() single torrent
*
* unused
*/
/****
Snark(I2PSnarkUtil util, String torrent, String ip, int user_port,
StorageListener slistener, CoordinatorListener clistener) {
this(torrent, ip, user_port, slistener, clistener, true, ".");
this(util, torrent, ip, user_port, slistener, clistener, null, null, null, true, ".");
}
****/
/**
* single torrent - via router
*
* unused
*/
/****
public Snark(I2PAppContext ctx, Properties opts, String torrent,
StorageListener slistener, boolean start, String rootDir) {
this(new I2PSnarkUtil(ctx), torrent, null, -1, slistener, null, null, null, null, false, rootDir);
String host = opts.getProperty("i2cp.hostname");
int port = 0;
String s = opts.getProperty("i2cp.port");
if (s != null) {
try {
port = Integer.parseInt(s);
} catch (NumberFormatException nfe) {}
}
_util.setI2CPConfig(host, port, opts);
s = opts.getProperty(SnarkManager.PROP_UPBW_MAX);
if (s != null) {
try {
int v = Integer.parseInt(s);
_util.setMaxUpBW(v);
} catch (NumberFormatException nfe) {}
}
s = opts.getProperty(PROP_MAX_CONNECTIONS);
if (s != null) {
try {
int v = Integer.parseInt(s);
_util.setMaxConnections(v);
} catch (NumberFormatException nfe) {}
}
if (start)
this.startTorrent();
}
****/
/**
* multitorrent
*
* Will not start itself. Caller must call startTorrent() if desired.
*
* @throws RuntimeException via fatal()
* @throws RouterException via fatalRouter()
*/
public Snark(I2PSnarkUtil util, String torrent, String ip, int user_port,
StorageListener slistener, CoordinatorListener clistener,
CompleteListener complistener, PeerCoordinatorSet peerCoordinatorSet,
ConnectionAcceptor connectionAcceptor, String rootDir)
{
this(util, torrent, ip, user_port, slistener, clistener, complistener,
peerCoordinatorSet, connectionAcceptor, rootDir, null);
}
Snark(String torrent, String ip, int user_port,
StorageListener slistener, CoordinatorListener clistener, boolean start, String rootDir)
/**
* multitorrent
*
* Will not start itself. Caller must call startTorrent() if desired.
*
* @param baseFile if null, use rootDir/torrentName; if non-null, use it instead
* @throws RuntimeException via fatal()
* @throws RouterException via fatalRouter()
* @since 0.9.11
*/
public Snark(I2PSnarkUtil util, String torrent, String ip, int user_port,
StorageListener slistener, CoordinatorListener clistener,
CompleteListener complistener, PeerCoordinatorSet peerCoordinatorSet,
ConnectionAcceptor connectionAcceptor, String rootDir, File baseFile)
{
if (slistener == null)
slistener = this;
if (clistener == null)
clistener = this;
completeListener = complistener;
_util = util;
_log = util.getContext().logManager().getLog(Snark.class);
_peerCoordinatorSet = peerCoordinatorSet;
acceptor = connectionAcceptor;
this.torrent = torrent;
this.rootDataDir = rootDir;
this.rootDataDir = new File(rootDir);
stopped = true;
activity = "Network setup";
// "Taking Three as the subject to reason about--
// A convenient number to state--
// We add Seven, and Ten, and then multiply out
// By One Thousand diminished by Eight.
//
// "The result we proceed to divide, as you see,
// By Nine Hundred and Ninety Two:
// Then subtract Seventeen, and the answer must be
// Exactly and perfectly true.
id = generateID();
if (_log.shouldLog(Log.INFO))
_log.info("My peer id: " + PeerID.idencode(id));
// Create a new ID and fill it with something random. First nine
// zeros bytes, then three bytes filled with snark and then
// sixteen random bytes.
byte snark = (((3 + 7 + 10) * (1000 - 8)) / 992) - 17;
byte[] id = new byte[20];
Random random = new Random();
int i;
for (i = 0; i < 9; i++)
id[i] = 0;
id[i++] = snark;
id[i++] = snark;
id[i++] = snark;
while (i < 20)
id[i++] = (byte)random.nextInt(256);
Snark.debug("My peer id: " + PeerID.idencode(id), Snark.INFO);
int port;
IOException lastException = null;
boolean ok = I2PSnarkUtil.instance().connect();
/*
* Don't start a tunnel if the torrent isn't going to be started.
* If we are starting,
* startTorrent() will force a connect.
*
boolean ok = util.connect();
if (!ok) fatal("Unable to connect to I2P");
I2PServerSocket serversocket = I2PSnarkUtil.instance().getServerSocket();
I2PServerSocket serversocket = util.getServerSocket();
if (serversocket == null)
fatal("Unable to listen for I2P connections");
else {
Destination d = serversocket.getManager().getSession().getMyDestination();
debug("Listening on I2P destination " + d.toBase64() + " / " + d.calculateHash().toBase64(), NOTICE);
}
*/
// Figure out what the torrent argument represents.
meta = null;
File f = null;
InputStream in = null;
byte[] x_infoHash = null;
try
{
InputStream in = null;
f = new File(torrent);
if (f.exists())
in = new FileInputStream(f);
else
{
/**** No, we don't ever fetch a torrent file this way
and we don't want to block in the constructor
activity = "Getting torrent";
File torrentFile = I2PSnarkUtil.instance().get(torrent);
File torrentFile = _util.get(torrent, 3);
if (torrentFile == null) {
fatal("Unable to fetch " + torrent);
if (false) return; // never reached - fatal(..) throws
......@@ -313,8 +387,11 @@ public class Snark
torrentFile.deleteOnExit();
in = new FileInputStream(torrentFile);
}
*****/
throw new IOException("not found");
}
meta = new MetaInfo(new BDecoder(in));
meta = new MetaInfo(in);
x_infoHash = meta.getInfoHash();
}
catch(IOException ioe)
{
......@@ -324,12 +401,12 @@ public class Snark
fatal("'" + torrent + "' exists,"
+ " but is not a valid torrent metainfo file."
+ System.getProperty("line.separator"), ioe);
else
else
fatal("I2PSnark does not support creating and tracking a torrent at the moment");
/*
{
// Try to create a new metainfo file
Snark.debug
debug
("Trying to create metainfo torrent for '" + torrent + "'",
NOTICE);
try
......@@ -348,9 +425,16 @@ public class Snark
*/
else
fatal("Cannot open '" + torrent + "'", ioe);
}
debug(meta.toString(), INFO);
} catch (OutOfMemoryError oom) {
fatalRouter("ERROR - Out of memory, cannot create torrent " + torrent + ": " + oom.getMessage(), oom);
} finally {
if (in != null)
try { in.close(); } catch (IOException ioe) {}
}
infoHash = x_infoHash; // final
if (_log.shouldLog(Log.INFO))
_log.info(meta.toString());
// When the metainfo torrent was created from an existing file/dir
// it already exists.
......@@ -359,84 +443,639 @@ public class Snark
try
{
activity = "Checking storage";
storage = new Storage(meta, slistener);
storage.check(rootDataDir);
boolean shouldPreserve = completeListener != null && completeListener.getSavedPreserveNamesSetting(this);
if (baseFile == null) {
String base = meta.getName();
if (!shouldPreserve)
base = Storage.filterName(base);
if (_util.getFilesPublic())
baseFile = new File(rootDataDir, base);
else
baseFile = new SecureFile(rootDataDir, base);
}
storage = new Storage(_util, baseFile, meta, slistener, shouldPreserve);
if (completeListener != null) {
storage.check(completeListener.getSavedTorrentTime(this),
completeListener.getSavedTorrentBitField(this));
} else {
storage.check();
}
// have to figure out when to reopen
// if (!start)
// storage.close();
}
catch (IOException ioe)
{
try { storage.close(); } catch (IOException ioee) {
ioee.printStackTrace();
}
fatal("Could not create storage", ioe);
fatal("Could not check or create files for " + getBaseInfo(), ioe);
}
}
/*
* see comment above
*
activity = "Collecting pieces";
coordinator = new PeerCoordinator(id, meta, storage, clistener, this);
PeerCoordinatorSet set = PeerCoordinatorSet.instance();
set.add(coordinator);
ConnectionAcceptor acceptor = ConnectionAcceptor.instance();
acceptor.startAccepting(set, serversocket);
trackerclient = new TrackerClient(meta, coordinator);
if (start)
startTorrent();
*/
savedUploaded = (completeListener != null) ? completeListener.getSavedUploaded(this) : 0;
if (completeListener != null)
_comments = completeListener.getSavedComments(this);
}
/**
* multitorrent, magnet, Used by snark-rpc plugin
*
* Will not start itself. Caller must call startTorrent() if desired.
*
* @param ignored used to be autostart
* @throws RuntimeException via fatal()
* @throws RouterException via fatalRouter()
* @since 0.8.4, removed in 0.9.36, restored in 0.9.45 with boolean param now ignored
*/
protected Snark(I2PSnarkUtil util, String torrent, byte[] ih, String trackerURL,
CompleteListener complistener, PeerCoordinatorSet peerCoordinatorSet,
ConnectionAcceptor connectionAcceptor, boolean ignored, String rootDir) {
this(util, torrent, ih, trackerURL, complistener, peerCoordinatorSet, connectionAcceptor, rootDir);
}
/**
* Start up contacting peers and querying the tracker
* multitorrent, magnet
*
* Will not start itself. Caller must call startTorrent() if desired.
*
* @param torrent a fake name for now (not a file name)
* @param ih 20-byte info hash
* @param trackerURL may be null
* @throws RuntimeException via fatal()
* @throws RouterException via fatalRouter()
* @since 0.8.4
*/
public void startTorrent() {
public Snark(I2PSnarkUtil util, String torrent, byte[] ih, String trackerURL,
CompleteListener complistener, PeerCoordinatorSet peerCoordinatorSet,
ConnectionAcceptor connectionAcceptor, String rootDir)
{
completeListener = complistener;
_util = util;
_log = util.getContext().logManager().getLog(Snark.class);
_peerCoordinatorSet = peerCoordinatorSet;
acceptor = connectionAcceptor;
this.torrent = torrent;
this.infoHash = ih;
this.additionalTrackerURL = trackerURL;
this.rootDataDir = rootDir != null ? new File(rootDir) : null; // null only for FetchAndAdd extension
savedUploaded = 0;
stopped = true;
id = generateID();
// All we have is an infoHash
// meta remains null
// storage remains null
}
private static byte[] generateID() {
// "Taking Three as the subject to reason about--
// A convenient number to state--
// We add Seven, and Ten, and then multiply out
// By One Thousand diminished by Eight.
//
// "The result we proceed to divide, as you see,
// By Nine Hundred and Ninety Two:
// Then subtract Seventeen, and the answer must be
// Exactly and perfectly true.
// Create a new ID and fill it with something random. First nine
// zeros bytes, then three bytes filled with snark and then
// eight random bytes.
byte snark = (((3 + 7 + 10) * (1000 - 8)) / 992) - 17;
byte[] rv = new byte[20];
rv[9] = snark;
rv[10] = snark;
rv[11] = snark;
try {
I2PAppContext.getGlobalContext().random().nextBytes(rv, 12, 8);
} catch (IllegalStateException ise) {
// random is shut down
throw new RouterException("Router shutdown", ise);
}
return rv;
}
/**
* Start up contacting peers and querying the tracker.
* Blocks if tunnel is not yet open.
*
* @throws RuntimeException via fatal()
* @throws RouterException via fatalRouter()
*/
public synchronized void startTorrent() {
if (!stopped)
return;
starting = true;
try {
x_startTorrent();
_startedTime = _util.getContext().clock().now();
} finally {
starting = false;
}
}
private void x_startTorrent() {
boolean ok = _util.connect();
if (!ok) {
if (_util.getContext().isRouterContext())
fatalRouter(_util.getString("Unable to connect to I2P"), null);
else
fatalRouter(_util.getString("Error connecting to I2P - check your I2CP settings!") + ' ' + _util.getI2CPHost() + ':' + _util.getI2CPPort(), null);
}
if (coordinator == null) {
I2PServerSocket serversocket = _util.getServerSocket();
if (serversocket == null)
fatalRouter("Unable to listen for I2P connections", null);
else {
Destination d = serversocket.getManager().getSession().getMyDestination();
if (_log.shouldLog(Log.INFO))
_log.info("Listening on I2P destination " + d.toBase64() + " / " + d.calculateHash().toBase64());
}
if (_log.shouldLog(Log.INFO))
_log.info("Starting PeerCoordinator, ConnectionAcceptor, and TrackerClient");
activity = "Collecting pieces";
coordinator = new PeerCoordinator(_util, id, infoHash, meta, storage, this, this, completeListener.getBandwidthListener());
coordinator.setUploaded(savedUploaded);
if (_peerCoordinatorSet != null) {
// multitorrent
_peerCoordinatorSet.add(coordinator);
} else {
// single torrent
acceptor = new ConnectionAcceptor(_util, new PeerAcceptor(coordinator));
}
// TODO pass saved closest DHT nodes to the tracker? or direct to the coordinator?
trackerclient = new TrackerClient(_util, meta, additionalTrackerURL, coordinator, this);
}
// ensure acceptor is running when in multitorrent
if (_peerCoordinatorSet != null && acceptor != null) {
acceptor.startAccepting();
}
stopped = false;
boolean coordinatorChanged = false;
if (coordinator.halted()) {
// ok, we have already started and stopped, but the coordinator seems a bit annoying to
// restart safely, so lets build a new one to replace the old
PeerCoordinatorSet set = PeerCoordinatorSet.instance();
set.remove(coordinator);
PeerCoordinator newCoord = new PeerCoordinator(coordinator.getID(), coordinator.getMetaInfo(),
coordinator.getStorage(), coordinator.getListener(), this);
set.add(newCoord);
coordinator = newCoord;
coordinatorChanged = true;
}
if (!trackerclient.started() && !coordinatorChanged) {
coordinator.restart();
if (_peerCoordinatorSet != null)
_peerCoordinatorSet.add(coordinator);
}
if (!trackerclient.started()) {
trackerclient.start();
} else if (trackerclient.halted() || coordinatorChanged) {
TrackerClient newClient = new TrackerClient(coordinator.getMetaInfo(), coordinator);
if (!trackerclient.halted())
trackerclient.halt();
trackerclient = newClient;
} else if (trackerclient.halted()) {
if (storage != null) {
try {
storage.reopen();
} catch (IOException ioe) {
try { storage.close(); } catch (IOException ioee) {
ioee.printStackTrace();
}
fatal("Could not open file for " + getBaseInfo(), ioe);
}
}
trackerclient.start();
} else {
if (_log.shouldLog(Log.INFO))
_log.info("NOT starting TrackerClient???");
}
}
/**
* Stop contacting the tracker and talking with peers
*/
public void stopTorrent() {
stopped = true;
stopTorrent(false);
}
/**
* Stop contacting the tracker and talking with peers
* @param fast if true, limit the life of the unannounce threads
* @since 0.9.1
*/
public synchronized void stopTorrent(boolean fast) {
TrackerClient tc = trackerclient;
if (tc != null)
tc.halt();
tc.halt(fast);
PeerCoordinator pc = coordinator;
if (pc != null)
pc.halt();
Storage st = storage;
if (!fast)
// HACK: Needed a way to distinguish between user-stop and
// shutdown-stop. stopTorrent(true) is in stopAllTorrents().
// (#766)
stopped = true;
if (st != null) {
// TODO: Cache the config-in-mem to compare vs config-on-disk
// (needed for auto-save to not double-save in some cases)
long nowUploaded = getUploaded();
// If autoStart is enabled, always save the config, so we know
// whether to start it up next time
boolean changed = storage.isChanged() || nowUploaded != savedUploaded ||
(completeListener != null && completeListener.shouldAutoStart());
try {
storage.close();
} catch (IOException ioe) {
System.out.println("Error closing " + torrent);
if (_log.shouldWarn())
_log.warn("Error closing " + torrent);
ioe.printStackTrace();
}
savedUploaded = nowUploaded;
// SnarkManager.stopAllTorrents() will save comments at shutdown even if never started...
if (completeListener != null) {
if (changed)
completeListener.updateStatus(this);
synchronized(_commentLock) {
if (_comments != null) {
synchronized(_comments) {
if (_comments.isModified())
completeListener.locked_saveComments(this, _comments);
}
}
}
}
}
if (pc != null)
PeerCoordinatorSet.instance().remove(pc);
if (fast)
// HACK: See above if(!fast)
stopped = true;
if (pc != null && _peerCoordinatorSet != null)
_peerCoordinatorSet.remove(pc);
if (_peerCoordinatorSet == null)
_util.disconnect();
}
static Snark parseArguments(String[] args)
/****
private static Snark parseArguments(String[] args)
{
return parseArguments(args, null, null);
}
****/
// Accessors
/**
* @return file name of .torrent file (should be full absolute path), or a fake name if in magnet mode.
* @since 0.8.4
*/
public String getName() {
return torrent;
}
/**
* @return base name of torrent [filtered version of getMetaInfo.getName()], or a fake name if in magnet mode
* @since 0.8.4
*/
public String getBaseName() {
if (storage != null)
return storage.getBaseName();
return torrent;
}
/**
* @return base name for torrent [filtered version of getMetaInfo.getName()],
* or a fake name if in magnet mode, followed by path info and error message,
* for error logging only
* @since 0.9.44
*/
private String getBaseInfo() {
if (storage != null)
return storage.getBaseName() + " at " +
storage.getBase() + " - check that device is present and writable";
return torrent;
}
/**
* @return always will be valid even in magnet mode
* @since 0.8.4
*/
public byte[] getID() {
return id;
}
/**
* @return always will be valid even in magnet mode
* @since 0.8.4
*/
public byte[] getInfoHash() {
// should always be the same
if (meta != null)
return meta.getInfoHash();
return infoHash;
}
/**
* @return may be null if in magnet mode
* @since 0.8.4
*/
public MetaInfo getMetaInfo() {
return meta;
}
/**
* @return may be null if in magnet mode
* @since 0.8.4
*/
public Storage getStorage() {
return storage;
}
/**
* @since 0.8.4
*/
public boolean isStopped() {
return stopped;
}
/**
* Startup in progress.
* @since 0.9.1
*/
public boolean isStarting() {
return starting && stopped;
}
/**
* Set startup in progress.
* @since 0.9.1
*/
public void setStarting() {
starting = true;
}
/**
* File checking in progress.
* @since 0.9.3
*/
public boolean isChecking() {
return storage != null && storage.isChecking();
}
/**
* If checking is in progress, return completion 0.0 ... 1.0,
* else return 1.0.
* @since 0.9.23
*/
public double getCheckingProgress() {
if (storage != null && storage.isChecking())
return storage.getCheckingProgress();
else
return 1.0d;
}
/**
* Disk allocation (ballooning) in progress.
* @since 0.9.3
*/
public boolean isAllocating() {
return storage != null && storage.isAllocating();
}
/**
* @since 0.8.4
*/
public long getDownloadRate() {
PeerCoordinator coord = coordinator;
if (coord != null)
return coord.getDownloadRate();
return 0;
}
/**
* @since 0.8.4
*/
public long getUploadRate() {
PeerCoordinator coord = coordinator;
if (coord != null)
return coord.getUploadRate();
return 0;
}
/**
* @since 0.8.4
*/
public long getDownloaded() {
PeerCoordinator coord = coordinator;
if (coord != null)
return coord.getDownloaded();
return 0;
}
/**
* @since 0.8.4
*/
public long getUploaded() {
PeerCoordinator coord = coordinator;
if (coord != null)
return coord.getUploaded();
return savedUploaded;
}
/**
* @since 0.8.4
*/
public int getPeerCount() {
PeerCoordinator coord = coordinator;
if (coord != null)
return coord.getPeerCount();
return 0;
}
/**
* @since 0.8.4
*/
public List<Peer> getPeerList() {
PeerCoordinator coord = coordinator;
if (coord != null)
return coord.peerList();
return Collections.emptyList();
}
/**
* Not HTML escaped.
* @return String returned from tracker, or null if no error
* @since 0.8.4
*/
public String getTrackerProblems() {
return trackerProblems;
}
/**
* @param p tracker error string or null
* @since 0.8.4
*/
public void setTrackerProblems(String p) {
trackerProblems = p;
}
/**
* @return count returned from tracker
* @since 0.8.4
*/
public int getTrackerSeenPeers() {
return trackerSeenPeers;
}
/**
* @since 0.8.4
*/
public void setTrackerSeenPeers(int p) {
trackerSeenPeers = p;
}
/**
* @since 0.8.4
*/
public void updatePiecePriorities() {
PeerCoordinator coord = coordinator;
if (coord != null)
coord.updatePiecePriorities();
}
/**
* @return total of all torrent files, or total of metainfo file if fetching magnet, or -1
* @since 0.8.4
*/
public long getTotalLength() {
if (meta != null)
return meta.getTotalLength();
// FIXME else return metainfo length if available
return -1;
}
/**
* Bytes not yet in storage. Does NOT account for skipped files.
* @return exact value. or -1 if no storage yet.
* getNeeded() * pieceLength(0) isn't accurate if last piece
* is still needed.
* @since 0.8.9
*/
public long getRemainingLength() {
if (meta != null && storage != null) {
long needed = storage.needed();
long length0 = meta.getPieceLength(0);
long remaining = needed * length0;
// fixup if last piece is needed
int last = meta.getPieces() - 1;
if (last != 0 && !storage.getBitField().get(last))
remaining -= length0 - meta.getPieceLength(last);
return remaining;
}
return -1;
}
/**
* Bytes still wanted. DOES account for (i.e. does not include) skipped files.
* FIXME -1 when not running.
* @return exact value. or -1 if no storage yet or when not running.
* @since 0.9.1
*/
public long getNeededLength() {
PeerCoordinator coord = coordinator;
if (coord != null)
return coord.getNeededLength();
return -1;
}
/**
* Bytes not received and set to skipped.
* This is not the same as the total of all skipped files,
* since pieces may span multiple files.
*
* @return exact value. or 0 if no storage yet.
* @since 0.9.24
*/
public long getSkippedLength() {
PeerCoordinator coord = coordinator;
if (coord != null) {
// fast way
long r = getRemainingLength();
if (r <= 0)
return 0;
long n = coord.getNeededLength();
return r - n;
} else if (storage != null) {
// slow way
return storage.getSkippedLength();
}
return 0;
}
/**
* Does not account (i.e. includes) for skipped files.
* @return number of pieces still needed (magnet mode or not), or -1 if unknown
* @since 0.8.4
*/
public long getNeeded() {
if (storage != null)
return storage.needed();
if (meta != null)
// FIXME subtract chunks we have
return meta.getTotalLength();
// FIXME fake
return -1;
}
/**
* @param p the piece number
* @return metainfo piece length or 16K if fetching magnet
* @since 0.8.4
*/
public int getPieceLength(int p) {
if (meta != null)
return meta.getPieceLength(p);
return 16*1024;
}
/**
* @return number of pieces
* @since 0.8.4
*/
public int getPieces() {
if (meta != null)
return meta.getPieces();
// FIXME else return metainfo pieces if available
return -1;
}
/**
* @return true if restarted
* @since 0.8.4
*/
public boolean restartAcceptor() {
if (acceptor == null)
return false;
acceptor.restart();
return true;
}
/**
* @return trackerURL string from magnet-mode constructor, may be null
* @since 0.8.4
*/
public String getTrackerURL() {
return additionalTrackerURL;
}
/**
* @since 0.9.9
*/
public boolean isAutoStoppable() { return _autoStoppable; }
/**
* @since 0.9.9
*/
public void setAutoStoppable(boolean yes) { _autoStoppable = yes; }
/**
* Sets debug, ip and torrent variables then creates a Snark
......@@ -444,7 +1083,8 @@ public class Snark
* non-valid argument list. The given listeners will be
* passed to all components that take one.
*/
static Snark parseArguments(String[] args,
/****
private static Snark parseArguments(String[] args,
StorageListener slistener,
CoordinatorListener clistener)
{
......@@ -452,11 +1092,14 @@ public class Snark
String ip = null;
String torrent = null;
boolean configured = I2PSnarkUtil.instance().configured();
I2PSnarkUtil util = new I2PSnarkUtil(I2PAppContext.getGlobalContext());
boolean configured = util.configured();
int i = 0;
while (i < args.length)
{
****/
/*
if (args[i].equals("--debug"))
{
debug = INFO;
......@@ -477,7 +1120,9 @@ public class Snark
catch (NumberFormatException nfe) { }
}
}
else if (args[i].equals("--port"))
else */
/****
if (args[i].equals("--port"))
{
if (args.length - 1 < i + 1)
usage("--port needs port number to listen on");
......@@ -493,17 +1138,17 @@ public class Snark
}
else if (args[i].equals("--no-commands"))
{
command_interpreter = false;
//command_interpreter = false;
i++;
}
else if (args[i].equals("--eepproxy"))
{
String proxyHost = args[i+1];
String proxyPort = args[i+2];
if (!configured)
I2PSnarkUtil.instance().setProxy(proxyHost, Integer.parseInt(proxyPort));
i += 3;
}
//else if (args[i].equals("--eepproxy"))
// {
// String proxyHost = args[i+1];
// String proxyPort = args[i+2];
// if (!configured)
// util.setProxy(proxyHost, Integer.parseInt(proxyPort));
// i += 3;
// }
else if (args[i].equals("--i2cp"))
{
String i2cpHost = args[i+1];
......@@ -523,7 +1168,7 @@ public class Snark
}
}
if (!configured)
I2PSnarkUtil.instance().setI2CPConfig(i2cpHost, Integer.parseInt(i2cpPort), opts);
util.setI2CPConfig(i2cpHost, Integer.parseInt(i2cpPort), opts);
i += 3 + (opts != null ? 1 : 0);
}
else
......@@ -540,7 +1185,7 @@ public class Snark
else
usage("Need exactly one <url>, <file> or <dir>.");
return new Snark(torrent, ip, user_port, slistener, clistener);
return new Snark(util, torrent, ip, user_port, slistener, clistener);
}
private static void usage(String s)
......@@ -552,22 +1197,13 @@ public class Snark
private static void usage()
{
System.out.println
("Usage: snark [--debug [level]] [--no-commands] [--port <port>]");
("Usage: snark [--no-commands] [--port <port>]");
System.out.println
(" [--eepproxy hostname portnum]");
System.out.println
(" [--i2cp routerHost routerPort ['name=val name=val name=val']]");
System.out.println
(" (<url>|<file>)");
System.out.println
(" --debug\tShows some extra info and stacktraces");
System.out.println
(" level\tHow much debug details to show");
System.out.println
(" \t(defaults to "
+ NOTICE + ", with --debug to "
+ INFO + ", highest level is "
+ ALL + ").");
System.out.println
(" --no-commands\tDon't read interactive commands or show usage info.");
System.out.println
......@@ -591,121 +1227,299 @@ public class Snark
(" <file> \tEither a local .torrent metainfo file to download");
System.out.println
(" \tor (with --share) a file to share.");
System.exit(-1);
}
****/
/**
* Aborts program abnormally.
* @throws RuntimeException always
*/
public void fatal(String s)
{
private void fatal(String s) throws RuntimeException {
fatal(s, null);
}
/**
* Aborts program abnormally.
* @throws RuntimeException always
*/
public void fatal(String s, Throwable t)
{
I2PSnarkUtil.instance().debug(s, ERROR, t);
//System.err.println("snark: " + s + ((t == null) ? "" : (": " + t)));
//if (debug >= INFO && t != null)
// t.printStackTrace();
private void fatal(String s, Throwable t) throws RuntimeException {
_log.error(s, t);
stopTorrent();
throw new RuntimeException("die bart die");
if (t != null)
s += ": " + t;
if (completeListener != null)
completeListener.fatal(this, s);
throw new RuntimeException(s, t);
}
/**
* Show debug info if debug is true.
* Throws a unique exception class to blame the router that can be caught by SnarkManager
* @throws RouterException always
* @since 0.9.46
*/
public static void debug(String s, int level)
{
I2PSnarkUtil.instance().debug(s, level, null);
//if (debug >= level)
// System.out.println(s);
private void fatalRouter(String s, Throwable t) throws RouterException {
_log.error(s, t);
if (!_util.getContext().isRouterContext())
System.out.println(s);
stopTorrent(true);
if (completeListener != null)
completeListener.fatal(this, s);
throw new RouterException(s, t);
}
/**
* A unique exception class to blame the router that can be caught by SnarkManager
* @since 0.9.46
*/
static class RouterException extends RuntimeException {
public RouterException(String s) { super(s); }
public RouterException(String s, Throwable t) { super(s, t); }
}
/** CoordinatorListener - this does nothing */
public void peerChange(PeerCoordinator coordinator, Peer peer)
{
// System.out.println(peer.toString());
}
boolean allocating = false;
/**
* Called when the PeerCoordinator got the MetaInfo via magnet.
* CoordinatorListener.
* Create the storage, tell SnarkManager, and give the storage
* back to the coordinator.
*
* @throws RuntimeException via fatal()
* @since 0.8.4
*/
public void gotMetaInfo(PeerCoordinator coordinator, MetaInfo metainfo) {
try {
String base = Storage.filterName(metainfo.getName());
File baseFile;
if (_util.getFilesPublic())
baseFile = new File(rootDataDir, base);
else
baseFile = new SecureFile(rootDataDir, base);
if (baseFile.exists())
throw new IOException("Data location already exists: " + baseFile);
// The following two may throw IOE...
storage = new Storage(_util, baseFile, metainfo, this, false);
storage.check();
// ... so don't set meta until here
meta = metainfo;
if (completeListener != null) {
String newName = completeListener.gotMetaInfo(this);
if (newName != null)
torrent = newName;
// else some horrible problem
}
coordinator.setStorage(storage);
} catch (IOException ioe) {
if (storage != null) {
try { storage.close(); } catch (IOException ioee) {}
// clear storage, we have a mess if we have non-null storage and null metainfo,
// as on restart, Storage.reopen() will throw an ioe
storage = null;
}
// TODO we're still in an inconsistent state, won't work if restarted
// (PeerState "disconnecting seed that connects to seeds"
fatal("Could not create file for " + getBaseInfo(), ioe);
}
}
/**
* Call after editing torrent.
* Caller must ensure infohash, files, etc. did not change.
*
* @since 0.9.53
*/
public void replaceMetaInfo(MetaInfo metainfo) {
meta = metainfo;
TrackerClient tc = trackerclient;
if (tc != null)
tc.reinitialize();
}
///////////// Begin StorageListener methods
//private boolean allocating = false;
/** does nothing */
public void storageCreateFile(Storage storage, String name, long length)
{
if (allocating)
System.out.println(); // Done with last file.
//if (allocating)
// System.out.println(); // Done with last file.
System.out.print("Creating file '" + name
+ "' of length " + length + ": ");
allocating = true;
//System.out.print("Creating file '" + name
// + "' of length " + length + ": ");
//allocating = true;
}
// How much storage space has been allocated
private long allocated = 0;
/** does nothing */
public void storageAllocated(Storage storage, long length)
{
allocating = true;
System.out.print(".");
allocated += length;
if (allocated == meta.getTotalLength())
System.out.println(); // We have all the disk space we need.
//allocating = true;
//System.out.print(".");
//allocated += length;
//if (allocated == meta.getTotalLength())
// System.out.println(); // We have all the disk space we need.
}
boolean allChecked = false;
boolean checking = false;
boolean prechecking = true;
private boolean allChecked;
private boolean checking;
//private boolean prechecking = true;
public void storageChecked(Storage storage, int num, boolean checked)
{
allocating = false;
//allocating = false;
if (!allChecked && !checking)
{
// Use the MetaInfo from the storage since our own might not
// yet be setup correctly.
MetaInfo meta = storage.getMetaInfo();
if (meta != null)
System.out.print("Checking existing "
+ meta.getPieces()
+ " pieces: ");
//MetaInfo meta = storage.getMetaInfo();
//if (meta != null)
// System.out.print("Checking existing "
// + meta.getPieces()
// + " pieces: ");
checking = true;
}
if (checking)
if (checked)
System.out.print("+");
else
System.out.print("-");
else
Snark.debug("Got " + (checked ? "" : "BAD ") + "piece: " + num,
Snark.INFO);
if (!checking) {
if (_log.shouldLog(Log.INFO))
_log.info("Got " + (checked ? "" : "BAD ") + "piece: " + num);
if (completeListener != null)
completeListener.gotPiece(this);
}
}
public void storageAllChecked(Storage storage)
{
if (checking)
System.out.println();
//if (checking)
// System.out.println();
allChecked = true;
checking = false;
if (storage.isChanged() && completeListener != null) {
completeListener.updateStatus(this);
// this saved the status, so reset the variables
storage.clearChanged();
savedUploaded = getUploaded();
}
}
public void storageCompleted(Storage storage)
{
Snark.debug("Completely received " + torrent, Snark.INFO);
if (_log.shouldLog(Log.INFO))
_log.info("Completely received " + torrent);
//storage.close();
System.out.println("Completely received: " + torrent);
if (completeListener != null)
//System.out.println("Completely received: " + torrent);
if (completeListener != null) {
completeListener.torrentComplete(this);
// this saved the status, so reset the variables
savedUploaded = getUploaded();
storage.clearChanged();
}
}
public void setWantedPieces(Storage storage)
{
if (coordinator != null)
coordinator.setWantedPieces();
}
///////////// End StorageListener methods
/** SnarkSnutdown callback unused */
public void shutdown()
{
// Should not be necessary since all non-deamon threads should
// Should not be necessary since all non-daemon threads should
// have died. But in reality this does not always happen.
System.exit(0);
//System.exit(0);
}
public interface CompleteListener {
public void torrentComplete(Snark snark);
/**
* StorageListener and CoordinatorListener callback
* @since 0.9.2
*/
public void addMessage(String message) {
if (completeListener != null)
completeListener.addMessage(this, message);
}
/** Maintain a configurable total uploader cap
* coordinatorListener
*/
final static int MIN_TOTAL_UPLOADERS = 4;
final static int MAX_TOTAL_UPLOADERS = 20;
public boolean overUploadLimit(int uploaders) {
if (_peerCoordinatorSet == null || uploaders <= 0)
return false;
int totalUploaders = 0;
for (PeerCoordinator c : _peerCoordinatorSet) {
if (!c.halted())
totalUploaders += c.getInterestedUploaders();
}
int limit = _util.getMaxUploaders();
if (_log.shouldLog(Log.DEBUG))
_log.debug("Total uploaders: " + totalUploaders + " Limit: " + limit);
return totalUploaders > limit;
}
/**
* A unique ID for this torrent, useful for RPC
* @return positive value unless you wrap around
* @since 0.9.30
*/
public int getRPCID() {
return _rpcID;
}
/**
* When did we start this torrent
* For RPC
* @return 0 if not started before. Not cleared when stopped.
* @since 0.9.30
*/
public long getStartedTime() {
return _startedTime;
}
/**
* The current comment set for this torrent.
* Not a copy.
* Caller MUST synch on the returned object for all operations.
*
* @return may be null if none
* @since 0.9.31
*/
public CommentSet getComments() {
synchronized(_commentLock) {
return _comments;
}
}
/**
* Add to the current comment set for this torrent,
* creating it if it didn't previously exist.
*
* @return true if the set changed
* @since 0.9.31
*/
public boolean addComments(List<Comment> comments) {
synchronized(_commentLock) {
if (_comments == null) {
_comments = new CommentSet(comments);
return true;
} else {
synchronized(_comments) {
return _comments.addAll(comments);
}
}
}
}
}